Merge pull request #2748 from TheBlueMatt/2023-11-2675-followups
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Tue, 9 Jan 2024 23:22:48 +0000 (23:22 +0000)
committerGitHub <noreply@github.com>
Tue, 9 Jan 2024 23:22:48 +0000 (23:22 +0000)
Doc and style followups from #2675

98 files changed:
.github/workflows/build.yml
CHANGELOG.md
CONTRIBUTING.md
Cargo.toml
bench/Cargo.toml
bench/benches/bench.rs
ci/check-cfg-flags.py [new file with mode: 0755]
ci/ci-tests.sh
fuzz/Cargo.toml
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
fuzz/src/onion_hop_data.rs
fuzz/src/onion_message.rs
fuzz/src/utils/test_logger.rs
lightning-background-processor/Cargo.toml
lightning-background-processor/src/lib.rs
lightning-block-sync/Cargo.toml
lightning-block-sync/src/convert.rs
lightning-block-sync/src/gossip.rs
lightning-block-sync/src/init.rs
lightning-block-sync/src/lib.rs
lightning-custom-message/Cargo.toml
lightning-invoice/Cargo.toml
lightning-invoice/fuzz/Cargo.toml
lightning-invoice/fuzz/fuzz_targets/serde_data_part.rs
lightning-invoice/src/de.rs
lightning-invoice/src/lib.rs
lightning-invoice/src/payment.rs
lightning-invoice/src/sync.rs [deleted file]
lightning-invoice/src/utils.rs
lightning-invoice/tests/ser_de.rs
lightning-net-tokio/Cargo.toml
lightning-net-tokio/src/lib.rs
lightning-persister/Cargo.toml
lightning-persister/src/fs_store.rs
lightning-persister/src/lib.rs
lightning-rapid-gossip-sync/Cargo.toml
lightning-rapid-gossip-sync/src/lib.rs
lightning-transaction-sync/Cargo.toml
lightning-transaction-sync/src/electrum.rs
lightning-transaction-sync/src/esplora.rs
lightning-transaction-sync/src/lib.rs
lightning/Cargo.toml
lightning/src/blinded_path/mod.rs
lightning/src/blinded_path/payment.rs
lightning/src/chain/chaininterface.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/chain/mod.rs
lightning/src/chain/onchaintx.rs
lightning/src/chain/package.rs
lightning/src/events/bump_transaction.rs
lightning/src/events/mod.rs
lightning/src/lib.rs
lightning/src/ln/blinded_payment_tests.rs
lightning/src/ln/chan_utils.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/features.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/mod.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/onion_payment.rs [new file with mode: 0644]
lightning/src/ln/onion_utils.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/reload_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/offers/invoice.rs
lightning/src/offers/invoice_request.rs
lightning/src/onion_message/functional_tests.rs
lightning/src/onion_message/messenger.rs
lightning/src/onion_message/offers.rs
lightning/src/onion_message/packet.rs
lightning/src/routing/gossip.rs
lightning/src/routing/router.rs
lightning/src/routing/scoring.rs
lightning/src/sign/ecdsa.rs [new file with mode: 0644]
lightning/src/sign/mod.rs
lightning/src/sign/taproot.rs [new file with mode: 0644]
lightning/src/sign/type_resolver.rs
lightning/src/sync/nostd_sync.rs
lightning/src/util/chacha20.rs
lightning/src/util/crypto.rs
lightning/src/util/logger.rs
lightning/src/util/macro_logger.rs
lightning/src/util/persist.rs
lightning/src/util/ser.rs
lightning/src/util/ser_macros.rs
lightning/src/util/test_channel_signer.rs
lightning/src/util/test_utils.rs
msrv-no-dev-deps-check/Cargo.toml
no-std-check/Cargo.toml
pending_changelog/113-channel-ser-compat.txt [deleted file]
pending_changelog/electrum.txt [deleted file]

index 00ef76f787e188e0e23fc94b2244bb1cbca8673f..ad5e1fc517bf4e5a706cbc90070ef407407e9cb5 100644 (file)
@@ -18,19 +18,7 @@ jobs:
       fail-fast: false
       matrix:
         platform: [ ubuntu-latest, windows-latest, macos-latest ]
-        toolchain: [ stable, beta ]
-        include:
-          - toolchain: stable
-            platform: ubuntu-latest
-          # 1.48.0 is the MSRV for all crates except lightning-transaction-sync and Win/Mac
-          - toolchain: 1.48.0
-            platform: ubuntu-latest
-          # Windows requires 1.49.0 because that's the MSRV for supported Tokio
-          - toolchain: 1.49.0
-            platform: windows-latest
-          # MacOS-latest requires 1.54.0 because that's what's required for linking to work properly
-          - toolchain: 1.54.0
-            platform: macos-latest
+        toolchain: [ stable, beta, 1.63.0 ] # 1.63.0 is the MSRV for all crates.
     runs-on: ${{ matrix.platform }}
     steps:
       - name: Checkout source code
@@ -44,11 +32,16 @@ jobs:
         run: |
           rustup target add thumbv7m-none-eabi
           sudo apt-get -y install gcc-arm-none-eabi
+      - name: Check for unknown cfg tags
+        run: ci/check-cfg-flags.py
       - name: shellcheck the CI script
         if: "matrix.platform == 'ubuntu-latest'"
         run: |
           sudo apt-get -y install shellcheck
           shellcheck ci/ci-tests.sh
+      - name: Set RUSTFLAGS to deny warnings
+        if: "matrix.toolchain == '1.63.0'"
+        run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV"
       - name: Run CI script
         shell: bash # Default on Winblows is powershell
         run: CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh
@@ -168,13 +161,13 @@ jobs:
         run: |
           cargo check --release
           cargo check --no-default-features --features=no-std --release
-          cargo check --no-default-features --features=futures --release
+          cargo check --no-default-features --features=futures,std --release
           cargo doc --release
       - name: Run cargo check for Taproot build.
         run: |
           cargo check --release
           cargo check --no-default-features --features=no-std --release
-          cargo check --no-default-features --features=futures --release
+          cargo check --no-default-features --features=futures,std --release
           cargo doc --release
         env:
           RUSTFLAGS: '--cfg=taproot'
index ee3283b293002c098ce5c011f064c5b1e34b6e18..157ebde756b00e6d7358bc590deed1316058fb39 100644 (file)
@@ -1,3 +1,122 @@
+# 0.0.119 - Dec 15, 2023 - "Spring Cleaning for Christmas"
+
+## API Updates
+ * The LDK crate ecosystem MSRV has been increased to 1.63 (#2681).
+ * The `bitcoin` dependency has been updated to version 0.30 (#2740).
+ * `lightning-invoice::payment::*` have been replaced with parameter generation
+   via `payment_parameters_from[_zero_amount]_invoice` (#2727).
+ * `{CoinSelection,Wallet}Source::sign_tx` are now `sign_psbt`, providing more
+   information, incl spent outputs, about the transaction being signed (#2775).
+ * Logger `Record`s now include `channel_id` and `peer_id` fields. These are
+   opportunistically filled in when a log record is specific to a given channel
+   and/or peer, and may occasionally be spuriously empty (#2314).
+ * When handling send or reply onion messages (e.g. for BOLT12 payments), a new
+   `Event::ConnectionNeeded` may be raised, indicating a direct connection
+   should be made to a payee or an introduction point. This event is expected to
+   be removed once onion message forwarding is widespread in the network (#2723)
+ * Scoring data decay now happens via `ScoreUpDate::time_passed`, called from
+   `lightning-background-processor`. `process_events_async` now takes a new
+   time-fetch function, and `ScoreUpDate` methods now take the current time as a
+   `Duration` argument. This avoids fetching time during pathfinding (#2656).
+ * Receiving payments to multi-hop blinded paths is now supported (#2688).
+ * `MessageRouter` and `Router` now feature methods to generate blinded paths to
+   the local node for incoming messages and payments. `Router` now extends
+   `MessageRouter`, and both are used in `ChannelManager` when processing or
+   creating BOLT12 structures to generate multi-hop blinded paths (#1781).
+ * `lightning-transaction-sync` now supports Electrum-based sync (#2685).
+ * `Confirm::get_relevant_txids` now returns the height at which a transaction
+   was confirmed. This can be used to assist in reorg detection (#2685).
+ * `ConfirmationTarget::MaxAllowedNonAnchorChannelRemoteFee` has been removed.
+   Non-anchor channel feerates are bounded indirectly through
+   `ChannelConfig::max_dust_htlc_exposure` (#2696).
+ * `lightning-invoice` `Description`s now rely on `UntrustedString` for
+   sanitization (#2730).
+ * `ScoreLookUp::channel_penalty_msat` now uses `CandidateRouteHop` (#2551).
+ * The `EcdsaChannelSigner` trait was moved to `lightning::sign::ecdsa` (#2512).
+ * `SignerProvider::get_destination_script` now takes `channel_keys_id` (#2744)
+ * `SpendableOutputDescriptor::StaticOutput` now has `channel_keys_id` (#2749).
+ * `EcdsaChannelSigner::sign_counterparty_commitment` now takes HTLC preimages
+   for both inbound and outbound HTLCs (#2753).
+ * `ClaimedHTLC` now includes a `counterparty_skimmed_fee_msat` field (#2715).
+ * `peel_payment_onion` was added to decode an encrypted onion for a payment
+   without receiving an HTLC. This allows for stateless verification of if a
+   theoretical payment would be accepted prior to receipt (#2700).
+ * `create_payment_onion` was added to construct an encrypted onion for a
+   payment path without sending an HTLC immediately (#2677).
+ * Various keys used in channels are now wrapped to provide type-safety for
+   specific usages of the keys (#2675).
+ * `TaggedHash` now includes the raw `tag` and `merkle_root` (#2687).
+ * `Offer::is_expired_no_std` was added (#2689).
+ * `PaymentPurpose::preimage()` was added (#2768).
+ * `temporary_channel_id` can now be specified in `create_channel` (#2699).
+ * Wire definitions for splicing messages were added (#2544).
+ * Various `lightning-invoice` structs now impl `Display`, now have pub fields,
+   or impl `From` (#2730).
+ * The `Hash` trait is now implemented for more structs, incl P2P msgs (#2716).
+
+## Performance Improvements
+ * Memory allocations (though not memory usage) have been substantially reduced,
+   meaning less overhead and hopefully less memory fragmentation (#2708, #2779).
+
+## Bug Fixes
+ * Since 0.0.117, calling `close_channel*` on a channel which has not yet been
+   funded would previously result in an infinite loop and hang (#2760).
+ * Since 0.0.116, sending payments requiring data in the onion for the recipient
+   which was too large for the onion may have caused corruption which resulted
+   in payment failure (#2752).
+ * Cooperative channel closure on channels with remaining output HTLCs may have
+   spuriously force-closed (#2529).
+ * In LDK versions 0.0.116 through 0.0.118, in rare cases where skimmed fees are
+   present on shutdown the `ChannelManager` may fail to deserialize (#2735).
+ * `ChannelConfig::max_dust_exposure` values which, converted to absolute fees,
+   exceeded 2^63 - 1 would result in an overflow and could lead to spurious
+   payment failures or channel closures (#2722).
+ * In cases where LDK is operating with provably-stale state, it panics to
+   avoid funds loss. This may not have happened in cases where LDK was behind
+   only exactly one state, leading instead to a revoked broadcast and funds
+   loss (#2721).
+ * Fixed a bug where decoding `Txid`s from Bitcoin Core JSON-RPC responses using
+   `lightning-block-sync` would not properly byte-swap the hash. Note that LDK
+   does not use this API internally (#2796).
+
+## Backwards Compatibility
+ * `ChannelManager`s written with LDK 0.0.119 are no longer readable by versions
+   of LDK prior to 0.0.113. Users wishing to downgrade to LDK 0.0.112 or before
+   can read an 0.0.119-serialized `ChannelManager` with a version of LDK from
+   0.0.113 to 0.0.118, re-serialize it, and then downgrade (#2708).
+ * Nodes that upgrade to 0.0.119 and subsequently downgrade after receiving a
+   payment to a blinded path may leak recipient information if one or more of
+   those HTLCs later fails (#2688).
+ * Similarly, forwarding a blinded HTLC and subsequently downgrading to an LDK
+   version prior to 0.0.119 may result in leaking the path information to the
+   payment sender (#2540).
+
+In total, this release features 148 files changed, 13780 insertions, 6279
+deletions in 280 commits from 22 authors, in alphabetical order:
+ * Arik Sosman
+ * Chris Waterson
+ * Elias Rohrer
+ * Evan Feenstra
+ * Gursharan Singh
+ * Jeffrey Czyz
+ * John Cantrell
+ * Lalitmohansharma1
+ * Matt Corallo
+ * Matthew Rheaume
+ * Orbital
+ * Rachel Malonson
+ * Valentine Wallace
+ * Willem Van Lint
+ * Wilmer Paulino
+ * alexanderwiederin
+ * benthecarman
+ * henghonglee
+ * jbesraa
+ * olegkubrakov
+ * optout
+ * shaavan
+
+
 # 0.0.118 - Oct 23, 2023 - "Just the Twelve Sinks"
 
 ## API Updates
index 350415af24cc0ad86d200a0199797c53a0518572..78c515007d03a75ba2b0261d25ebc74f46531244 100644 (file)
@@ -88,7 +88,7 @@ be covered by functional tests.
 When refactoring, structure your PR to make it easy to review and don't
 hesitate to split it into multiple small, focused PRs.
 
-The Minimum Supported Rust Version (MSRV) currently is 1.48.0 (enforced by
+The Minimum Supported Rust Version (MSRV) currently is 1.63.0 (enforced by
 our GitHub Actions). We support reading serialized LDK objects written by any
 version of LDK 0.0.99 and above. We support LDK versions 0.0.113 and above
 reading serialized LDK objects written by modern LDK. Any expected issues with
index 8614cb48c1f15207023d9fd00b952600804d4e1b..a12f6ff9ff3de2ded9fbc368029aba53f945f066 100644 (file)
@@ -1,4 +1,5 @@
 [workspace]
+resolver = "2"
 
 members = [
     "lightning",
@@ -7,11 +8,11 @@ members = [
     "lightning-net-tokio",
     "lightning-persister",
     "lightning-background-processor",
-    "lightning-rapid-gossip-sync"
+    "lightning-rapid-gossip-sync",
+    "lightning-custom-message",
 ]
 
 exclude = [
-    "lightning-custom-message",
     "lightning-transaction-sync",
     "no-std-check",
     "msrv-no-dev-deps-check",
index e582d29da8108981ac4b42cf712e5d04624ed854..05354890c2a8fb4f5f0e9233fa273d444aac7cd9 100644 (file)
@@ -2,7 +2,7 @@
 name = "lightning-bench"
 version = "0.0.1"
 authors = ["Matt Corallo"]
-edition = "2018"
+edition = "2021"
 
 [[bench]]
 name = "bench"
index eaa3fcec50c85188b2350ef39a08f6dd01ae86ae..b854ffb93cef999cb96ef93d1650b71f024b8ef9 100644 (file)
@@ -21,5 +21,6 @@ criterion_group!(benches,
        lightning_persister::fs_store::bench::bench_sends,
        lightning_rapid_gossip_sync::bench::bench_reading_full_graph_from_file,
        lightning::routing::gossip::benches::read_network_graph,
-       lightning::routing::gossip::benches::write_network_graph);
+       lightning::routing::gossip::benches::write_network_graph,
+       lightning::routing::scoring::benches::decay_100k_channel_bounds);
 criterion_main!(benches);
diff --git a/ci/check-cfg-flags.py b/ci/check-cfg-flags.py
new file mode 100755 (executable)
index 0000000..277ae10
--- /dev/null
@@ -0,0 +1,156 @@
+#!/usr/bin/env python3
+# Rust is fairly relaxed in checking the validity of arguments passed to #[cfg].
+# While it should probably be more strict when checking features, it cannot be
+# strict when checking loose cfg tags, because those can be anything and are
+# simply passed to rustc via unconstrained arguments.
+#
+# Thus, we do it for rustc manually, but scanning all our source and checking
+# that all our cfg tags match a known cfg tag.
+import sys, glob, re
+
+def check_feature(feature):
+    if feature == "std":
+        pass
+    elif feature == "no-std":
+        pass
+    elif feature == "hashbrown":
+        pass
+    elif feature == "backtrace":
+        pass
+    elif feature == "grind_signatures":
+        pass
+    elif feature == "unsafe_revoked_tx_signing":
+        pass
+    elif feature == "futures":
+        pass
+    elif feature == "tokio":
+        pass
+    elif feature == "rest-client":
+        pass
+    elif feature == "rpc-client":
+        pass
+    elif feature == "serde":
+        pass
+    elif feature == "esplora-blocking":
+        pass
+    elif feature == "esplora-async":
+        pass
+    elif feature == "async-interface":
+        pass
+    elif feature == "electrum":
+        pass
+    elif feature == "time":
+        pass
+    elif feature == "_test_utils":
+        pass
+    elif feature == "_test_vectors":
+        pass
+    elif feature == "afl":
+        pass
+    elif feature == "honggfuzz":
+        pass
+    elif feature == "libfuzzer_fuzz":
+        pass
+    elif feature == "stdin_fuzz":
+        pass
+    elif feature == "max_level_off":
+        pass
+    elif feature == "max_level_error":
+        pass
+    elif feature == "max_level_warn":
+        pass
+    elif feature == "max_level_info":
+        pass
+    elif feature == "max_level_debug":
+        pass
+    elif feature == "max_level_trace":
+        pass
+    else:
+        print("Bad feature: " + feature)
+        assert False
+
+def check_target_os(os):
+    if os == "windows":
+        pass
+    else:
+        assert False
+
+def check_cfg_tag(cfg):
+    if cfg == "fuzzing":
+        pass
+    elif cfg == "test":
+        pass
+    elif cfg == "debug_assertions":
+        pass
+    elif cfg == "c_bindings":
+        pass
+    elif cfg == "ldk_bench":
+        pass
+    elif cfg == "taproot":
+        pass
+    elif cfg == "async_signing":
+        pass
+    elif cfg == "require_route_graph_test":
+        pass
+    else:
+        print("Bad cfg tag: " + cfg)
+        assert False
+
+def check_cfg_args(cfg):
+    if cfg.startswith("all(") or cfg.startswith("any(") or cfg.startswith("not("):
+        brackets = 1
+        pos = 4
+        while pos < len(cfg):
+            if cfg[pos] == "(":
+                brackets += 1
+            elif cfg[pos] == ")":
+                brackets -= 1
+                if brackets == 0:
+                    check_cfg_args(cfg[4:pos])
+                    if pos + 1 != len(cfg):
+                        assert cfg[pos + 1] == ","
+                        check_cfg_args(cfg[pos + 2:].strip())
+                    return
+            pos += 1
+        assert False
+        assert(cfg.endswith(")"))
+        check_cfg_args(cfg[4:len(cfg)-1])
+    else:
+        parts = [part.strip() for part in cfg.split(",", 1)]
+        if len(parts) > 1:
+            for part in parts:
+                check_cfg_args(part)
+        elif cfg.startswith("feature") or cfg.startswith("target_os") or cfg.startswith("target_pointer_width"):
+            arg = cfg
+            if cfg.startswith("feature"):
+                arg = arg[7:].strip()
+            elif cfg.startswith("target_os"):
+                arg = arg[9:].strip()
+            else:
+                arg = arg[20:].strip()
+            assert arg.startswith("=")
+            arg = arg[1:].strip()
+            assert arg.startswith("\"")
+            assert arg.endswith("\"")
+            arg = arg[1:len(arg)-1]
+            assert not "\"" in arg
+            if cfg.startswith("feature"):
+                check_feature(arg)
+            elif cfg.startswith("target_os"):
+                check_target_os(arg)
+            else:
+                assert arg == "32" or arg == "64"
+        else:
+            check_cfg_tag(cfg.strip())
+
+cfg_regex = re.compile("#\[cfg\((.*)\)\]")
+for path in glob.glob(sys.path[0] + "/../**/*.rs", recursive = True):
+    with open(path, "r") as file:
+        while True:
+            line = file.readline()
+            if not line:
+                break
+            if "#[cfg(" in line:
+                if not line.strip().startswith("//"):
+                    cfg_part = cfg_regex.match(line.strip()).group(1)
+                    check_cfg_args(cfg_part)
index 2db32a1081c2c07d5a4f7e30c03e40ce79fbe9ff..3eccc48798dc6e4b5f3fccd0b0c29003be23b0c8 100755 (executable)
@@ -8,38 +8,60 @@ HOST_PLATFORM="$(rustc --version --verbose | grep "host:" | awk '{ print $2 }')"
 # which we do here.
 # Further crates which appear only as dev-dependencies are pinned further down.
 function PIN_RELEASE_DEPS {
-       # Tokio MSRV on versions 1.17 through 1.26 is rustc 1.49. Above 1.26 MSRV is 1.56.
-       [ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p tokio --precise "1.14.1" --verbose
-       [[ "$RUSTC_MINOR_VERSION" -gt 48  &&  "$RUSTC_MINOR_VERSION" -lt 56 ]] && cargo update -p tokio --precise "1.25.1" --verbose
-
-       # Sadly the log crate is always a dependency of tokio until 1.20, and has no reasonable MSRV guarantees
-       [ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p log --precise "0.4.18" --verbose
-
-       # The serde_json crate switched to Rust edition 2021 starting with v1.0.101, i.e., has MSRV of 1.56
-       [ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p serde_json --precise "1.0.100" --verbose
-
        return 0 # Don't fail the script if our rustc is higher than the last check
 }
 
-PIN_RELEASE_DEPS # pin the release dependencies in our main workspace
-
-# The addr2line v0.20 crate (a dependency of `backtrace` starting with 0.3.68) relies on 1.55+
-[ "$RUSTC_MINOR_VERSION" -lt 55 ] && cargo update -p backtrace --precise "0.3.67" --verbose
-
-# The quote crate switched to Rust edition 2021 starting with v1.0.31, i.e., has MSRV of 1.56
-[ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p quote --precise "1.0.30" --verbose
+# The tests of `lightning-transaction-sync` require `electrs` and `bitcoind`
+# binaries. Here, we download the binaries, validate them, and export their
+# location via `ELECTRS_EXE`/`BITCOIND_EXE` which will be used by the
+# `electrsd`/`bitcoind` crates in our tests.
+function DOWNLOAD_ELECTRS_AND_BITCOIND {
+       ELECTRS_DL_ENDPOINT="https://github.com/RCasatta/electrsd/releases/download/electrs_releases"
+       ELECTRS_VERSION="esplora_a33e97e1a1fc63fa9c20a116bb92579bbf43b254"
+       BITCOIND_DL_ENDPOINT="https://bitcoincore.org/bin/"
+       BITCOIND_VERSION="25.1"
+       if [[ "$HOST_PLATFORM" == *linux* ]]; then
+               ELECTRS_DL_FILE_NAME=electrs_linux_"$ELECTRS_VERSION".zip
+               ELECTRS_DL_HASH="865e26a96e8df77df01d96f2f569dcf9622fc87a8d99a9b8fe30861a4db9ddf1"
+               BITCOIND_DL_FILE_NAME=bitcoin-"$BITCOIND_VERSION"-x86_64-linux-gnu.tar.gz
+               BITCOIND_DL_HASH="a978c407b497a727f0444156e397b50491ce862d1f906fef9b521415b3611c8b"
+       elif [[ "$HOST_PLATFORM" == *darwin* ]]; then
+               ELECTRS_DL_FILE_NAME=electrs_macos_"$ELECTRS_VERSION".zip
+               ELECTRS_DL_HASH="2d5ff149e8a2482d3658e9b386830dfc40c8fbd7c175ca7cbac58240a9505bcd"
+               BITCOIND_DL_FILE_NAME=bitcoin-"$BITCOIND_VERSION"-x86_64-apple-darwin.tar.gz
+               BITCOIND_DL_HASH="1acfde0ec3128381b83e3e5f54d1c7907871d324549129592144dd12a821eff1"
+       else
+               echo -e "\n\nUnsupported platform. Exiting.."
+               exit 1
+       fi
+
+       DL_TMP_DIR=$(mktemp -d)
+       trap 'rm -rf -- "$DL_TMP_DIR"' EXIT
+
+       pushd "$DL_TMP_DIR"
+       ELECTRS_DL_URL="$ELECTRS_DL_ENDPOINT"/"$ELECTRS_DL_FILE_NAME"
+       curl -L -o "$ELECTRS_DL_FILE_NAME" "$ELECTRS_DL_URL"
+       echo "$ELECTRS_DL_HASH  $ELECTRS_DL_FILE_NAME"|shasum -a 256 -c
+       unzip "$ELECTRS_DL_FILE_NAME"
+       export ELECTRS_EXE="$DL_TMP_DIR"/electrs
+       chmod +x "$ELECTRS_EXE"
+
+       BITCOIND_DL_URL="$BITCOIND_DL_ENDPOINT"/bitcoin-core-"$BITCOIND_VERSION"/"$BITCOIND_DL_FILE_NAME"
+       curl -L -o "$BITCOIND_DL_FILE_NAME" "$BITCOIND_DL_URL"
+       echo "$BITCOIND_DL_HASH  $BITCOIND_DL_FILE_NAME"|shasum -a 256 -c
+       tar xzf "$BITCOIND_DL_FILE_NAME"
+       export BITCOIND_EXE="$DL_TMP_DIR"/bitcoin-"$BITCOIND_VERSION"/bin/bitcoind
+       chmod +x "$BITCOIND_EXE"
+       popd
+}
 
-# The syn crate depends on too-new proc-macro2 starting with v2.0.33, i.e., has MSRV of 1.56
-if [ "$RUSTC_MINOR_VERSION" -lt 56 ]; then
-       SYN_2_DEP=$(grep -o '"syn 2.*' Cargo.lock | tr -d '",' | tr ' ' ':')
-       cargo update -p "$SYN_2_DEP" --precise "2.0.32" --verbose
-fi
+PIN_RELEASE_DEPS # pin the release dependencies in our main workspace
 
-# The proc-macro2 crate switched to Rust edition 2021 starting with v1.0.66, i.e., has MSRV of 1.56
-[ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p proc-macro2 --precise "1.0.65" --verbose
+# Starting with version 1.10.0, the `regex` crate has an MSRV of rustc 1.65.0.
+[ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p regex --precise "1.9.6" --verbose
 
-# The memchr crate switched to an MSRV of 1.60 starting with v2.6.0
-[ "$RUSTC_MINOR_VERSION" -lt 60 ] && cargo update -p memchr --precise "2.5.0" --verbose
+# The addr2line v0.21 crate (a dependency of `backtrace` starting with 0.3.69) relies on rustc 1.65
+[ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p backtrace --precise "0.3.68" --verbose
 
 export RUST_BACKTRACE=1
 
@@ -59,17 +81,28 @@ cargo test --verbose --color always --features rpc-client,rest-client,tokio
 cargo check --verbose --color always --features rpc-client,rest-client,tokio
 popd
 
-if [[ $RUSTC_MINOR_VERSION -gt 67 && "$HOST_PLATFORM" != *windows* ]]; then
+if [[ "$HOST_PLATFORM" != *windows* ]]; then
        echo -e "\n\nBuilding and testing Transaction Sync Clients with features"
        pushd lightning-transaction-sync
-       cargo test --verbose --color always --features esplora-blocking
-       cargo check --verbose --color always --features esplora-blocking
-       cargo test --verbose --color always --features esplora-async
-       cargo check --verbose --color always --features esplora-async
-       cargo test --verbose --color always --features esplora-async-https
-       cargo check --verbose --color always --features esplora-async-https
-       cargo test --verbose --color always --features electrum
-       cargo check --verbose --color always --features electrum
+
+       # reqwest 0.11.21 had a regression that broke its 1.63.0 MSRV
+       [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p reqwest --precise "0.11.20" --verbose
+       # Starting with version 1.10.0, the `regex` crate has an MSRV of rustc 1.65.0.
+       [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p regex --precise "1.9.6" --verbose
+       # Starting with version 0.5.9 (there is no .6-.8), the `home` crate has an MSRV of rustc 1.70.0.
+       [ "$RUSTC_MINOR_VERSION" -lt 70 ] && cargo update -p home --precise "0.5.5" --verbose
+
+       DOWNLOAD_ELECTRS_AND_BITCOIND
+
+       RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo test --verbose --color always --features esplora-blocking
+       RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo check --verbose --color always --features esplora-blocking
+       RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo test --verbose --color always --features esplora-async
+       RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo check --verbose --color always --features esplora-async
+       RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo test --verbose --color always --features esplora-async-https
+       RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo check --verbose --color always --features esplora-async-https
+       RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo test --verbose --color always --features electrum
+       RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo check --verbose --color always --features electrum
+
        popd
 fi
 
@@ -78,39 +111,36 @@ pushd lightning-background-processor
 cargo test --verbose --color always --features futures
 popd
 
-if [ "$RUSTC_MINOR_VERSION" -gt 55 ]; then
-       echo -e "\n\nTest Custom Message Macros"
-       pushd lightning-custom-message
-       cargo test --verbose --color always
-       [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
-       popd
-fi
+echo -e "\n\nTest Custom Message Macros"
+pushd lightning-custom-message
+cargo test --verbose --color always
+[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
+popd
 
-if [ "$RUSTC_MINOR_VERSION" -gt 51 ]; then # Current `object` MSRV, subject to change
-       echo -e "\n\nTest backtrace-debug builds"
-       pushd lightning
-       cargo test --verbose --color always --features backtrace
-       popd
-fi
+echo -e "\n\nTest backtrace-debug builds"
+pushd lightning
+cargo test --verbose --color always --features backtrace
+popd
 
 echo -e "\n\nBuilding with all Log-Limiting features"
 pushd lightning
 grep '^max_level_' Cargo.toml | awk '{ print $1 }'| while read -r FEATURE; do
-       cargo check --verbose --color always --features "$FEATURE"
+       RUSTFLAGS="$RUSTFLAGS -A unused_variables -A unused_macros -A unused_imports -A dead_code" cargo check --verbose --color always --features "$FEATURE"
 done
 popd
 
 echo -e "\n\nTesting no-std flags in various combinations"
 for DIR in lightning lightning-invoice lightning-rapid-gossip-sync; do
-       [ "$RUSTC_MINOR_VERSION" -gt 50 ] && cargo test -p $DIR --verbose --color always --no-default-features --features no-std
+       cargo test -p $DIR --verbose --color always --no-default-features --features no-std
        # check if there is a conflict between no-std and the default std feature
-       [ "$RUSTC_MINOR_VERSION" -gt 50 ] && cargo test -p $DIR --verbose --color always --features no-std
+       cargo test -p $DIR --verbose --color always --features no-std
 done
+
 for DIR in lightning lightning-invoice lightning-rapid-gossip-sync; do
        # check if there is a conflict between no-std and the c_bindings cfg
-       [ "$RUSTC_MINOR_VERSION" -gt 50 ] && RUSTFLAGS="--cfg=c_bindings" cargo test -p $DIR --verbose --color always --no-default-features --features=no-std
+       RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test -p $DIR --verbose --color always --no-default-features --features=no-std
 done
-RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always
+RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test --verbose --color always
 
 # Note that outbound_commitment_test only runs in this mode because of hardcoded signature values
 pushd lightning
@@ -125,16 +155,7 @@ popd
 echo -e "\n\nTesting no-std build on a downstream no-std crate"
 # check no-std compatibility across dependencies
 pushd no-std-check
-if [[ $RUSTC_MINOR_VERSION -gt 67 ]]; then
-       # lightning-transaction-sync's MSRV is 1.67
-       cargo check --verbose --color always --features lightning-transaction-sync
-else
-       # The memchr crate switched to an MSRV of 1.60 starting with v2.6.0
-       # This is currently only a release dependency via core2, which we intend to work with
-       # rust-bitcoin to remove soon.
-       [ "$RUSTC_MINOR_VERSION" -lt 60 ] && cargo update -p memchr --precise "2.5.0" --verbose
-       cargo check --verbose --color always
-fi
+cargo check --verbose --color always --features lightning-transaction-sync
 [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
 popd
 
@@ -152,7 +173,6 @@ if [ -f "$(which arm-none-eabi-gcc)" ]; then
        popd
 fi
 
-echo -e "\n\nTest Taproot builds"
-pushd lightning
-RUSTFLAGS="$RUSTFLAGS --cfg=taproot" cargo test --verbose --color always -p lightning
-popd
+echo -e "\n\nTest cfg-flag builds"
+RUSTFLAGS="--cfg=taproot" cargo test --verbose --color always -p lightning
+RUSTFLAGS="--cfg=async_signing" cargo test --verbose --color always -p lightning
index 573096efdfcfb3983fa94448c57453e9b21cdde4..0c279a015c7398fe2ba0d8ba5e5b7ec49f2a154a 100644 (file)
@@ -3,7 +3,7 @@ name = "lightning-fuzz"
 version = "0.0.1"
 authors = ["Automatically generated"]
 publish = false
-edition = "2018"
+edition = "2021"
 # Because the function is unused it gets dropped before we link lightning, so
 # we have to duplicate build.rs here. Note that this is only required for
 # fuzzing mode.
index af0c64d88aea7f76f751ebc24a15bade136919ed..7a32434b86fff16dfcd58e76e9495095c6c4098f 100644 (file)
@@ -30,6 +30,8 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
 use bitcoin::hash_types::{BlockHash, WPubkeyHash};
 
+use lightning::blinded_path::BlindedPath;
+use lightning::blinded_path::payment::ReceiveTlvs;
 use lightning::chain;
 use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, chainmonitor, channelmonitor, Confirm, Watch};
 use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent};
@@ -44,8 +46,9 @@ use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
 use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
 use lightning::ln::script::ShutdownScript;
 use lightning::ln::functional_test_utils::*;
-use lightning::offers::invoice::UnsignedBolt12Invoice;
+use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
 use lightning::offers::invoice_request::UnsignedInvoiceRequest;
+use lightning::onion_message::{Destination, MessageRouter, OnionMessagePath};
 use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState};
 use lightning::util::errors::APIError;
 use lightning::util::logger::Logger;
@@ -56,7 +59,7 @@ use lightning::routing::router::{InFlightHtlcs, Path, Route, RouteHop, RoutePara
 use crate::utils::test_logger::{self, Output};
 use crate::utils::test_persister::TestPersister;
 
-use bitcoin::secp256k1::{Message, PublicKey, SecretKey, Scalar, Secp256k1};
+use bitcoin::secp256k1::{Message, PublicKey, SecretKey, Scalar, Secp256k1, self};
 use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature};
 use bitcoin::secp256k1::schnorr;
@@ -99,6 +102,32 @@ impl Router for FuzzRouter {
                        action: msgs::ErrorAction::IgnoreError
                })
        }
+
+       fn create_blinded_payment_paths<
+               ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+       >(
+               &self, _recipient: PublicKey, _first_hops: Vec<ChannelDetails>, _tlvs: ReceiveTlvs,
+               _amount_msats: u64, _entropy_source: &ES, _secp_ctx: &Secp256k1<T>
+       ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
+               unreachable!()
+       }
+}
+
+impl MessageRouter for FuzzRouter {
+       fn find_path(
+               &self, _sender: PublicKey, _peers: Vec<PublicKey>, _destination: Destination
+       ) -> Result<OnionMessagePath, ()> {
+               unreachable!()
+       }
+
+       fn create_blinded_paths<
+               ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+       >(
+               &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
+               _secp_ctx: &Secp256k1<T>
+       ) -> Result<Vec<BlindedPath>, ()> {
+               unreachable!()
+       }
 }
 
 pub struct TestBroadcaster {}
@@ -230,14 +259,16 @@ impl NodeSigner for KeyProvider {
 }
 
 impl SignerProvider for KeyProvider {
-       type Signer = TestChannelSigner;
+       type EcdsaSigner = TestChannelSigner;
+       #[cfg(taproot)]
+       type TaprootSigner = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
                let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed) as u8;
                [id; 32]
        }
 
-       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::Signer {
+       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
                let secp_ctx = Secp256k1::signing_only();
                let id = channel_keys_id[0];
                let keys = InMemorySigner::new(
@@ -256,7 +287,7 @@ impl SignerProvider for KeyProvider {
                TestChannelSigner::new_with_revoked(keys, revoked_commitment, false)
        }
 
-       fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::Signer, DecodeError> {
+       fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> {
                let mut reader = std::io::Cursor::new(buffer);
 
                let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
@@ -270,7 +301,7 @@ impl SignerProvider for KeyProvider {
                })
        }
 
-       fn get_destination_script(&self) -> Result<ScriptBuf, ()> {
+       fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
                let secp_ctx = Secp256k1::signing_only();
                let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_secret[31]]).unwrap();
                let our_channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
index 8a597a614772a46d711c34a4ba0758eea4cd2d64..a2ce98cf4d22c9bebaf40ff895b98f88d7b8da2e 100644 (file)
@@ -28,6 +28,8 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
 use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
 
+use lightning::blinded_path::BlindedPath;
+use lightning::blinded_path::payment::ReceiveTlvs;
 use lightning::chain;
 use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen};
 use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
@@ -41,8 +43,9 @@ use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor,Ig
 use lightning::ln::msgs::{self, DecodeError};
 use lightning::ln::script::ShutdownScript;
 use lightning::ln::functional_test_utils::*;
-use lightning::offers::invoice::UnsignedBolt12Invoice;
+use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
 use lightning::offers::invoice_request::UnsignedInvoiceRequest;
+use lightning::onion_message::{Destination, MessageRouter, OnionMessagePath};
 use lightning::routing::gossip::{P2PGossipSync, NetworkGraph};
 use lightning::routing::utxo::UtxoLookup;
 use lightning::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteParameters, Router};
@@ -55,7 +58,7 @@ use lightning::util::ser::{ReadableArgs, Writeable};
 use crate::utils::test_logger;
 use crate::utils::test_persister::TestPersister;
 
-use bitcoin::secp256k1::{Message, PublicKey, SecretKey, Scalar, Secp256k1};
+use bitcoin::secp256k1::{Message, PublicKey, SecretKey, Scalar, Secp256k1, self};
 use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature};
 use bitcoin::secp256k1::schnorr;
@@ -142,6 +145,32 @@ impl Router for FuzzRouter {
                        action: msgs::ErrorAction::IgnoreError
                })
        }
+
+       fn create_blinded_payment_paths<
+               ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+       >(
+               &self, _recipient: PublicKey, _first_hops: Vec<ChannelDetails>, _tlvs: ReceiveTlvs,
+               _amount_msats: u64, _entropy_source: &ES, _secp_ctx: &Secp256k1<T>
+       ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
+               unreachable!()
+       }
+}
+
+impl MessageRouter for FuzzRouter {
+       fn find_path(
+               &self, _sender: PublicKey, _peers: Vec<PublicKey>, _destination: Destination
+       ) -> Result<OnionMessagePath, ()> {
+               unreachable!()
+       }
+
+       fn create_blinded_paths<
+               ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+       >(
+               &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
+               _secp_ctx: &Secp256k1<T>
+       ) -> Result<Vec<BlindedPath>, ()> {
+               unreachable!()
+       }
 }
 
 struct TestBroadcaster {
@@ -340,7 +369,9 @@ impl NodeSigner for KeyProvider {
 }
 
 impl SignerProvider for KeyProvider {
-       type Signer = TestChannelSigner;
+       type EcdsaSigner = TestChannelSigner;
+       #[cfg(taproot)]
+       type TaprootSigner = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
                let ctr = self.counter.fetch_add(1, Ordering::Relaxed) as u8;
@@ -348,7 +379,7 @@ impl SignerProvider for KeyProvider {
                [ctr; 32]
        }
 
-       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::Signer {
+       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
                let secp_ctx = Secp256k1::signing_only();
                let ctr = channel_keys_id[0];
                let (inbound, state) = self.signer_state.borrow().get(&ctr).unwrap().clone();
@@ -392,7 +423,7 @@ impl SignerProvider for KeyProvider {
                ))
        }
 
-       fn get_destination_script(&self) -> Result<ScriptBuf, ()> {
+       fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
                let secp_ctx = Secp256k1::signing_only();
                let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
                let our_channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
@@ -726,7 +757,7 @@ mod tests {
                pub lines: Mutex<HashMap<(String, String), usize>>,
        }
        impl Logger for TrackingLogger {
-               fn log(&self, record: &Record) {
+               fn log(&self, record: Record) {
                        *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
                        println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
                }
index cc80ccf932088423aa37e1a637c0b85e22322602..e7f51b9916775244cb73a87b3b2de57fbe585efa 100644 (file)
@@ -16,16 +16,18 @@ use lightning::util::test_utils;
 #[inline]
 pub fn onion_hop_data_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
        use lightning::util::ser::ReadableArgs;
+       use bitcoin::secp256k1::PublicKey;
        let mut r = ::std::io::Cursor::new(data);
        let node_signer = test_utils::TestNodeSigner::new(test_utils::privkey(42));
-       let _ =  <lightning::ln::msgs::InboundOnionPayload as ReadableArgs<&&test_utils::TestNodeSigner>>::read(&mut r, &&node_signer);
+       let _ =  <lightning::ln::msgs::InboundOnionPayload as ReadableArgs<(Option<PublicKey>, &&test_utils::TestNodeSigner)>>::read(&mut r, (None, &&node_signer));
 }
 
 #[no_mangle]
 pub extern "C" fn onion_hop_data_run(data: *const u8, datalen: usize) {
        use lightning::util::ser::ReadableArgs;
+       use bitcoin::secp256k1::PublicKey;
        let data = unsafe { std::slice::from_raw_parts(data, datalen) };
        let mut r = ::std::io::Cursor::new(data);
        let node_signer = test_utils::TestNodeSigner::new(test_utils::privkey(42));
-       let _ =  <lightning::ln::msgs::InboundOnionPayload as ReadableArgs<&&test_utils::TestNodeSigner>>::read(&mut r, &&node_signer);
+       let _ =  <lightning::ln::msgs::InboundOnionPayload as ReadableArgs<(Option<PublicKey>, &&test_utils::TestNodeSigner)>>::read(&mut r, (None, &&node_signer));
 }
index c071d806e93aa42fb7bc653d2232f3dbf3491238..d2d60cfcf640593e2e38d264eac6eeb0c2abafad 100644 (file)
@@ -1,17 +1,18 @@
 // Imports that need to be added manually
 use bitcoin::bech32::u5;
 use bitcoin::blockdata::script::ScriptBuf;
-use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey};
+use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey, self};
 use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1::ecdsa::RecoverableSignature;
 use bitcoin::secp256k1::schnorr;
 
-use lightning::sign::{Recipient, KeyMaterial, EntropySource, NodeSigner, SignerProvider};
+use lightning::blinded_path::BlindedPath;
 use lightning::ln::features::InitFeatures;
 use lightning::ln::msgs::{self, DecodeError, OnionMessageHandler};
 use lightning::ln::script::ShutdownScript;
 use lightning::offers::invoice::UnsignedBolt12Invoice;
 use lightning::offers::invoice_request::UnsignedInvoiceRequest;
+use lightning::sign::{Recipient, KeyMaterial, EntropySource, NodeSigner, SignerProvider};
 use lightning::util::test_channel_signer::TestChannelSigner;
 use lightning::util::logger::Logger;
 use lightning::util::ser::{Readable, Writeable, Writer};
@@ -79,8 +80,18 @@ impl MessageRouter for TestMessageRouter {
                Ok(OnionMessagePath {
                        intermediate_nodes: vec![],
                        destination,
+                       first_node_addresses: None,
                })
        }
+
+       fn create_blinded_paths<
+               ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+       >(
+               &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
+               _secp_ctx: &Secp256k1<T>
+       ) -> Result<Vec<BlindedPath>, ()> {
+               unreachable!()
+       }
 }
 
 struct TestOffersMessageHandler {}
@@ -91,6 +102,7 @@ impl OffersMessageHandler for TestOffersMessageHandler {
        }
 }
 
+#[derive(Debug)]
 struct TestCustomMessage {}
 
 const CUSTOM_MESSAGE_TYPE: u64 = 4242;
@@ -189,17 +201,19 @@ impl NodeSigner for KeyProvider {
 }
 
 impl SignerProvider for KeyProvider {
-       type Signer = TestChannelSigner;
+       type EcdsaSigner = TestChannelSigner;
+       #[cfg(taproot)]
+       type TaprootSigner = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] { unreachable!() }
 
-       fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
+       fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
                unreachable!()
        }
 
        fn read_chan_signer(&self, _data: &[u8]) -> Result<TestChannelSigner, DecodeError> { unreachable!() }
 
-       fn get_destination_script(&self) -> Result<ScriptBuf, ()> { unreachable!() }
+       fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> { unreachable!() }
 
        fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> { unreachable!() }
 }
@@ -216,7 +230,7 @@ mod tests {
                pub lines: Mutex<HashMap<(String, String), usize>>,
        }
        impl Logger for TrackingLogger {
-               fn log(&self, record: &Record) {
+               fn log(&self, record: Record) {
                        *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
                        println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
                }
@@ -263,9 +277,12 @@ mod tests {
                {
                        let log_entries = logger.lines.lock().unwrap();
                        assert_eq!(log_entries.get(&("lightning::onion_message::messenger".to_string(),
-                                               "Received an onion message with path_id None and a reply_path".to_string())), Some(&1));
+                                               "Received an onion message with path_id None and a reply_path: Custom(TestCustomMessage)"
+                                               .to_string())), Some(&1));
+                       assert_eq!(log_entries.get(&("lightning::onion_message::messenger".to_string(),
+                                               "Constructing onion message when responding to Custom onion message with path_id None: TestCustomMessage".to_string())), Some(&1));
                        assert_eq!(log_entries.get(&("lightning::onion_message::messenger".to_string(),
-                                               "Sending onion message when responding to Custom onion message with path_id None".to_string())), Some(&1));
+                                               "Buffered onion message when responding to Custom onion message with path_id None".to_string())), Some(&1));
                }
 
                let two_unblinded_hops_om = "\
index f8c96f99bd1c8ddb9c1f9b28d8d7b5fbe67cc27b..5e5817e23f1f92d18f5d83406db2cff23319491d 100644 (file)
@@ -56,7 +56,7 @@ impl<'a, Out: Output> Write for LockedWriteAdapter<'a, Out> {
 }
 
 impl<Out: Output> Logger for TestLogger<Out> {
-       fn log(&self, record: &Record) {
+       fn log(&self, record: Record) {
                write!(LockedWriteAdapter(&self.out),
                        "{:<5} {} [{} : {}] {}\n", record.level.to_string(), self.id, record.module_path, record.line, record.args)
                        .unwrap();
index 933cbc466a677a8cd1f93e9d5e63219682477870..3832ef96922dce37c2ba9a9eecc8e32f76161dbb 100644 (file)
@@ -1,13 +1,13 @@
 [package]
 name = "lightning-background-processor"
-version = "0.0.118"
+version = "0.0.119"
 authors = ["Valentine Wallace <vwallace@protonmail.com>"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
 description = """
 Utilities to perform required background tasks for Rust Lightning.
 """
-edition = "2018"
+edition = "2021"
 
 [package.metadata.docs.rs]
 all-features = true
@@ -22,11 +22,11 @@ default = ["std"]
 
 [dependencies]
 bitcoin = { version = "0.30.2", default-features = false }
-lightning = { version = "0.0.118", path = "../lightning", default-features = false }
-lightning-rapid-gossip-sync = { version = "0.0.118", path = "../lightning-rapid-gossip-sync", default-features = false }
+lightning = { version = "0.0.119", path = "../lightning", default-features = false }
+lightning-rapid-gossip-sync = { version = "0.0.119", path = "../lightning-rapid-gossip-sync", default-features = false }
 
 [dev-dependencies]
-tokio = { version = "1.14", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] }
-lightning = { version = "0.0.118", path = "../lightning", features = ["_test_utils"] }
-lightning-invoice = { version = "0.26.0", path = "../lightning-invoice" }
-lightning-persister = { version = "0.0.118", path = "../lightning-persister" }
+tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] }
+lightning = { version = "0.0.119", path = "../lightning", features = ["_test_utils"] }
+lightning-invoice = { version = "0.27.0", path = "../lightning-invoice" }
+lightning-persister = { version = "0.0.119", path = "../lightning-persister" }
index 76252324efc7a37d0072a62e08ac4f171a3841ea..0f2c67538d65de59acb37da608ca0f48da78f227 100644 (file)
@@ -2,9 +2,8 @@
 //! running properly, and (2) either can or should be run in the background. See docs for
 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
 
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
@@ -28,8 +27,12 @@ use lightning::chain::chainmonitor::{ChainMonitor, Persist};
 use lightning::sign::{EntropySource, NodeSigner, SignerProvider};
 use lightning::events::{Event, PathFailure};
 #[cfg(feature = "std")]
-use lightning::events::{EventHandler, EventsProvider};
+use lightning::events::EventHandler;
+#[cfg(any(feature = "std", feature = "futures"))]
+use lightning::events::EventsProvider;
+
 use lightning::ln::channelmanager::ChannelManager;
+use lightning::ln::msgs::OnionMessageHandler;
 use lightning::ln::peer_handler::APeerManager;
 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
 use lightning::routing::utxo::UtxoLookup;
@@ -104,11 +107,16 @@ const PING_TIMER: u64 = 30;
 #[cfg(test)]
 const PING_TIMER: u64 = 1;
 
+#[cfg(not(test))]
+const ONION_MESSAGE_HANDLER_TIMER: u64 = 10;
+#[cfg(test)]
+const ONION_MESSAGE_HANDLER_TIMER: u64 = 1;
+
 /// Prune the network graph of stale entries hourly.
 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
 
 #[cfg(not(test))]
-const SCORER_PERSIST_TIMER: u64 = 60 * 60;
+const SCORER_PERSIST_TIMER: u64 = 60 * 5;
 #[cfg(test)]
 const SCORER_PERSIST_TIMER: u64 = 1;
 
@@ -239,30 +247,30 @@ fn handle_network_graph_update<L: Deref>(
 /// Updates scorer based on event and returns whether an update occurred so we can decide whether
 /// to persist.
 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
-       scorer: &'a S, event: &Event
+       scorer: &'a S, event: &Event, duration_since_epoch: Duration,
 ) -> bool {
        match event {
                Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
                        let mut score = scorer.write_lock();
-                       score.payment_path_failed(path, *scid);
+                       score.payment_path_failed(path, *scid, duration_since_epoch);
                },
                Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
                        // Reached if the destination explicitly failed it back. We treat this as a successful probe
                        // because the payment made it all the way to the destination with sufficient liquidity.
                        let mut score = scorer.write_lock();
-                       score.probe_successful(path);
+                       score.probe_successful(path, duration_since_epoch);
                },
                Event::PaymentPathSuccessful { path, .. } => {
                        let mut score = scorer.write_lock();
-                       score.payment_path_successful(path);
+                       score.payment_path_successful(path, duration_since_epoch);
                },
                Event::ProbeSuccessful { path, .. } => {
                        let mut score = scorer.write_lock();
-                       score.probe_successful(path);
+                       score.probe_successful(path, duration_since_epoch);
                },
                Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
                        let mut score = scorer.write_lock();
-                       score.probe_failed(path, *scid);
+                       score.probe_failed(path, *scid, duration_since_epoch);
                },
                _ => return false,
        }
@@ -270,27 +278,31 @@ fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + Wri
 }
 
 macro_rules! define_run_body {
-       ($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
-        $channel_manager: ident, $process_channel_manager_events: expr,
-        $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
-        $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
-        $check_slow_await: expr)
-       => { {
+       (
+               $persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
+               $channel_manager: ident, $process_channel_manager_events: expr,
+               $peer_manager: ident, $process_onion_message_handler_events: expr, $gossip_sync: ident,
+               $logger: ident, $scorer: ident, $loop_exit_check: expr, $await: expr, $get_timer: expr,
+               $timer_elapsed: expr, $check_slow_await: expr, $time_fetch: expr,
+       ) => { {
                log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
                $channel_manager.timer_tick_occurred();
                log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
                $chain_monitor.rebroadcast_pending_claims();
 
                let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
+               let mut last_onion_message_handler_call = $get_timer(ONION_MESSAGE_HANDLER_TIMER);
                let mut last_ping_call = $get_timer(PING_TIMER);
                let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
                let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
                let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
                let mut have_pruned = false;
+               let mut have_decayed_scorer = false;
 
                loop {
                        $process_channel_manager_events;
                        $process_chain_monitor_events;
+                       $process_onion_message_handler_events;
 
                        // Note that the PeerManager::process_events may block on ChannelManager's locks,
                        // hence it comes last here. When the ChannelManager finishes whatever it's doing,
@@ -334,6 +346,11 @@ macro_rules! define_run_body {
                                $channel_manager.timer_tick_occurred();
                                last_freshness_call = $get_timer(FRESHNESS_TIMER);
                        }
+                       if $timer_elapsed(&mut last_onion_message_handler_call, ONION_MESSAGE_HANDLER_TIMER) {
+                               log_trace!($logger, "Calling OnionMessageHandler's timer_tick_occurred");
+                               $peer_manager.onion_message_handler().timer_tick_occurred();
+                               last_onion_message_handler_call = $get_timer(ONION_MESSAGE_HANDLER_TIMER);
+                       }
                        if await_slow {
                                // On various platforms, we may be starved of CPU cycles for several reasons.
                                // E.g. on iOS, if we've been in the background, we will be entirely paused.
@@ -370,11 +387,10 @@ macro_rules! define_run_body {
                        if should_prune {
                                // The network graph must not be pruned while rapid sync completion is pending
                                if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
-                                       #[cfg(feature = "std")] {
+                                       if let Some(duration_since_epoch) = $time_fetch() {
                                                log_trace!($logger, "Pruning and persisting network graph.");
-                                               network_graph.remove_stale_channels_and_tracking();
-                                       }
-                                       #[cfg(not(feature = "std"))] {
+                                               network_graph.remove_stale_channels_and_tracking_with_time(duration_since_epoch.as_secs());
+                                       } else {
                                                log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
                                                log_trace!($logger, "Persisting network graph.");
                                        }
@@ -389,9 +405,24 @@ macro_rules! define_run_body {
                                last_prune_call = $get_timer(prune_timer);
                        }
 
+                       if !have_decayed_scorer {
+                               if let Some(ref scorer) = $scorer {
+                                       if let Some(duration_since_epoch) = $time_fetch() {
+                                               log_trace!($logger, "Calling time_passed on scorer at startup");
+                                               scorer.write_lock().time_passed(duration_since_epoch);
+                                       }
+                               }
+                               have_decayed_scorer = true;
+                       }
+
                        if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
                                if let Some(ref scorer) = $scorer {
-                                       log_trace!($logger, "Persisting scorer");
+                                       if let Some(duration_since_epoch) = $time_fetch() {
+                                               log_trace!($logger, "Calling time_passed and persisting scorer");
+                                               scorer.write_lock().time_passed(duration_since_epoch);
+                                       } else {
+                                               log_trace!($logger, "Persisting scorer");
+                                       }
                                        if let Err(e) = $persister.persist_scorer(&scorer) {
                                                log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
                                        }
@@ -497,12 +528,16 @@ use core::task;
 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
 /// are hundreds or thousands of simultaneous process calls running.
 ///
+/// The `fetch_time` parameter should return the current wall clock time, if one is available. If
+/// no time is available, some features may be disabled, however the node will still operate fine.
+///
 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
 /// could setup `process_events_async` like this:
 /// ```
 /// # use lightning::io;
 /// # use std::sync::{Arc, RwLock};
 /// # use std::sync::atomic::{AtomicBool, Ordering};
+/// # use std::time::SystemTime;
 /// # use lightning_background_processor::{process_events_async, GossipSync};
 /// # struct MyStore {}
 /// # impl lightning::util::persist::KVStore for MyStore {
@@ -571,6 +606,7 @@ use core::task;
 ///                    Some(background_scorer),
 ///                    sleeper,
 ///                    mobile_interruptable_platform,
+///                    || Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap())
 ///                    )
 ///                    .await
 ///                    .expect("Failed to process events");
@@ -599,25 +635,25 @@ pub async fn process_events_async<
        EventHandlerFuture: core::future::Future<Output = ()>,
        EventHandler: Fn(Event) -> EventHandlerFuture,
        PS: 'static + Deref + Send,
-       M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
+       M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
        CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
        PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
        RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
-       APM: APeerManager + Send + Sync,
-       PM: 'static + Deref<Target = APM> + Send + Sync,
+       PM: 'static + Deref + Send + Sync,
        S: 'static + Deref<Target = SC> + Send + Sync,
        SC: for<'b> WriteableScore<'b>,
        SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
-       Sleeper: Fn(Duration) -> SleepFuture
+       Sleeper: Fn(Duration) -> SleepFuture,
+       FetchTime: Fn() -> Option<Duration>,
 >(
        persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
        gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
-       sleeper: Sleeper, mobile_interruptable_platform: bool,
+       sleeper: Sleeper, mobile_interruptable_platform: bool, fetch_time: FetchTime,
 ) -> Result<(), lightning::io::Error>
 where
        UL::Target: 'static + UtxoLookup,
        CF::Target: 'static + chain::Filter,
-       CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: 'static + BroadcasterInterface,
        ES::Target: 'static + EntropySource,
        NS::Target: 'static + NodeSigner,
@@ -625,8 +661,9 @@ where
        F::Target: 'static + FeeEstimator,
        R::Target: 'static + Router,
        L::Target: 'static + Logger,
-       P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
+       P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
        PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
+       PM::Target: APeerManager + Send + Sync,
 {
        let mut should_break = false;
        let async_event_handler = |event| {
@@ -635,25 +672,30 @@ where
                let scorer = &scorer;
                let logger = &logger;
                let persister = &persister;
+               let fetch_time = &fetch_time;
                async move {
                        if let Some(network_graph) = network_graph {
                                handle_network_graph_update(network_graph, &event)
                        }
                        if let Some(ref scorer) = scorer {
-                               if update_scorer(scorer, &event) {
-                                       log_trace!(logger, "Persisting scorer after update");
-                                       if let Err(e) = persister.persist_scorer(&scorer) {
-                                               log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
+                               if let Some(duration_since_epoch) = fetch_time() {
+                                       if update_scorer(scorer, &event, duration_since_epoch) {
+                                               log_trace!(logger, "Persisting scorer after update");
+                                               if let Err(e) = persister.persist_scorer(&scorer) {
+                                                       log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
+                                               }
                                        }
                                }
                        }
                        event_handler(event).await;
                }
        };
-       define_run_body!(persister,
-               chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
+       define_run_body!(
+               persister, chain_monitor,
+               chain_monitor.process_pending_events_async(async_event_handler).await,
                channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
-               gossip_sync, peer_manager, logger, scorer, should_break, {
+               peer_manager, process_onion_message_handler_events_async(&peer_manager, async_event_handler).await,
+               gossip_sync, logger, scorer, should_break, {
                        let fut = Selector {
                                a: channel_manager.get_event_or_persistence_needed_future(),
                                b: chain_monitor.get_update_future(),
@@ -673,7 +715,27 @@ where
                                task::Poll::Ready(exit) => { should_break = exit; true },
                                task::Poll::Pending => false,
                        }
-               }, mobile_interruptable_platform)
+               }, mobile_interruptable_platform, fetch_time,
+       )
+}
+
+#[cfg(feature = "futures")]
+async fn process_onion_message_handler_events_async<
+       EventHandlerFuture: core::future::Future<Output = ()>,
+       EventHandler: Fn(Event) -> EventHandlerFuture,
+       PM: 'static + Deref + Send + Sync,
+>(
+       peer_manager: &PM, handler: EventHandler
+)
+where
+       PM::Target: APeerManager + Send + Sync,
+{
+       let events = core::cell::RefCell::new(Vec::new());
+       peer_manager.onion_message_handler().process_pending_events(&|e| events.borrow_mut().push(e));
+
+       for event in events.into_inner() {
+               handler(event).await
+       }
 }
 
 #[cfg(feature = "std")]
@@ -738,12 +800,11 @@ impl BackgroundProcessor {
                P: 'static + Deref + Send + Sync,
                EH: 'static + EventHandler + Send,
                PS: 'static + Deref + Send,
-               M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
+               M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
                CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
                PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
                RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
-               APM: APeerManager + Send + Sync,
-               PM: 'static + Deref<Target = APM> + Send + Sync,
+               PM: 'static + Deref + Send + Sync,
                S: 'static + Deref<Target = SC> + Send + Sync,
                SC: for <'b> WriteableScore<'b>,
        >(
@@ -753,7 +814,7 @@ impl BackgroundProcessor {
        where
                UL::Target: 'static + UtxoLookup,
                CF::Target: 'static + chain::Filter,
-               CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
+               CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
                T::Target: 'static + BroadcasterInterface,
                ES::Target: 'static + EntropySource,
                NS::Target: 'static + NodeSigner,
@@ -761,8 +822,9 @@ impl BackgroundProcessor {
                F::Target: 'static + FeeEstimator,
                R::Target: 'static + Router,
                L::Target: 'static + Logger,
-               P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
+               P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
                PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
+               PM::Target: APeerManager + Send + Sync,
        {
                let stop_thread = Arc::new(AtomicBool::new(false));
                let stop_thread_clone = stop_thread.clone();
@@ -773,7 +835,10 @@ impl BackgroundProcessor {
                                        handle_network_graph_update(network_graph, &event)
                                }
                                if let Some(ref scorer) = scorer {
-                                       if update_scorer(scorer, &event) {
+                                       use std::time::SystemTime;
+                                       let duration_since_epoch = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
+                                               .expect("Time should be sometime after 1970");
+                                       if update_scorer(scorer, &event, duration_since_epoch) {
                                                log_trace!(logger, "Persisting scorer after update");
                                                if let Err(e) = persister.persist_scorer(&scorer) {
                                                        log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
@@ -782,14 +847,23 @@ impl BackgroundProcessor {
                                }
                                event_handler.handle_event(event);
                        };
-                       define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
+                       define_run_body!(
+                               persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
                                channel_manager, channel_manager.process_pending_events(&event_handler),
-                               gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
+                               peer_manager,
+                               peer_manager.onion_message_handler().process_pending_events(&event_handler),
+                               gossip_sync, logger, scorer, stop_thread.load(Ordering::Acquire),
                                { Sleeper::from_two_futures(
                                        channel_manager.get_event_or_persistence_needed_future(),
                                        chain_monitor.get_update_future()
                                ).wait_timeout(Duration::from_millis(100)); },
-                               |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
+                               |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false,
+                               || {
+                                       use std::time::SystemTime;
+                                       Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
+                                               .expect("Time should be sometime after 1970"))
+                               },
+                       )
                });
                Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
        }
@@ -862,9 +936,9 @@ mod tests {
        use lightning::ln::functional_test_utils::*;
        use lightning::ln::msgs::{ChannelMessageHandler, Init};
        use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
-       use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
-       use lightning::routing::router::{DefaultRouter, Path, RouteHop};
+       use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
        use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp, LockableScore};
+       use lightning::routing::router::{DefaultRouter, Path, RouteHop, CandidateRouteHop};
        use lightning::util::config::UserConfig;
        use lightning::util::ser::Writeable;
        use lightning::util::test_utils;
@@ -1071,12 +1145,12 @@ mod tests {
        impl ScoreLookUp for TestScorer {
                type ScoreParams = ();
                fn channel_penalty_msat(
-                       &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage, _score_params: &Self::ScoreParams
+                       &self, _candidate: &CandidateRouteHop, _usage: ChannelUsage, _score_params: &Self::ScoreParams
                ) -> u64 { unimplemented!(); }
        }
 
        impl ScoreUpdate for TestScorer {
-               fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
+               fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64, _: Duration) {
                        if let Some(expectations) = &mut self.event_expectations {
                                match expectations.pop_front().unwrap() {
                                        TestResult::PaymentFailure { path, short_channel_id } => {
@@ -1096,7 +1170,7 @@ mod tests {
                        }
                }
 
-               fn payment_path_successful(&mut self, actual_path: &Path) {
+               fn payment_path_successful(&mut self, actual_path: &Path, _: Duration) {
                        if let Some(expectations) = &mut self.event_expectations {
                                match expectations.pop_front().unwrap() {
                                        TestResult::PaymentFailure { path, .. } => {
@@ -1115,7 +1189,7 @@ mod tests {
                        }
                }
 
-               fn probe_failed(&mut self, actual_path: &Path, _: u64) {
+               fn probe_failed(&mut self, actual_path: &Path, _: u64, _: Duration) {
                        if let Some(expectations) = &mut self.event_expectations {
                                match expectations.pop_front().unwrap() {
                                        TestResult::PaymentFailure { path, .. } => {
@@ -1133,7 +1207,7 @@ mod tests {
                                }
                        }
                }
-               fn probe_successful(&mut self, actual_path: &Path) {
+               fn probe_successful(&mut self, actual_path: &Path, _: Duration) {
                        if let Some(expectations) = &mut self.event_expectations {
                                match expectations.pop_front().unwrap() {
                                        TestResult::PaymentFailure { path, .. } => {
@@ -1151,6 +1225,7 @@ mod tests {
                                }
                        }
                }
+               fn time_passed(&mut self, _: Duration) {}
        }
 
        #[cfg(c_bindings)]
@@ -1362,9 +1437,11 @@ mod tests {
 
        #[test]
        fn test_timer_tick_called() {
-               // Test that `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
-               // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
-               // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
+               // Test that:
+               // - `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
+               // - `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`,
+               // - `PeerManager::timer_tick_occurred` is called every `PING_TIMER`, and
+               // - `OnionMessageHandler::timer_tick_occurred` is called every `ONION_MESSAGE_HANDLER_TIMER`.
                let (_, nodes) = create_nodes(1, "test_timer_tick_called");
                let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
@@ -1375,9 +1452,11 @@ mod tests {
                        let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
                        let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
                        let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
-                       if log_entries.get(&("lightning_background_processor".to_string(), desired_log_1)).is_some() &&
-                               log_entries.get(&("lightning_background_processor".to_string(), desired_log_2)).is_some() &&
-                               log_entries.get(&("lightning_background_processor".to_string(), desired_log_3)).is_some() {
+                       let desired_log_4 = "Calling OnionMessageHandler's timer_tick_occurred".to_string();
+                       if log_entries.get(&("lightning_background_processor", desired_log_1)).is_some() &&
+                               log_entries.get(&("lightning_background_processor", desired_log_2)).is_some() &&
+                               log_entries.get(&("lightning_background_processor", desired_log_3)).is_some() &&
+                               log_entries.get(&("lightning_background_processor", desired_log_4)).is_some() {
                                break
                        }
                }
@@ -1424,7 +1503,7 @@ mod tests {
                                        tokio::time::sleep(dur).await;
                                        false // Never exit
                                })
-                       }, false,
+                       }, false, || Some(Duration::ZERO),
                );
                match bp_future.await {
                        Ok(_) => panic!("Expected error persisting manager"),
@@ -1555,8 +1634,8 @@ mod tests {
 
                loop {
                        let log_entries = nodes[0].logger.lines.lock().unwrap();
-                       let expected_log = "Persisting scorer".to_string();
-                       if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
+                       let expected_log = "Calling time_passed and persisting scorer".to_string();
+                       if log_entries.get(&("lightning_background_processor", expected_log)).is_some() {
                                break
                        }
                }
@@ -1580,7 +1659,7 @@ mod tests {
                                $sleep;
                                let log_entries = $nodes[0].logger.lines.lock().unwrap();
                                let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
-                               if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
+                               if *log_entries.get(&("lightning_background_processor", loop_counter))
                                        .unwrap_or(&0) > 1
                                {
                                        // Wait until the loop has gone around at least twice.
@@ -1654,7 +1733,7 @@ mod tests {
                                                _ = exit_receiver.changed() => true,
                                        }
                                })
-                       }, false,
+                       }, false, || Some(Duration::from_secs(1696300000)),
                );
 
                let t1 = tokio::spawn(bp_future);
@@ -1792,7 +1871,7 @@ mod tests {
 
                let log_entries = nodes[0].logger.lines.lock().unwrap();
                let expected_log = "Persisting scorer after update".to_string();
-               assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
+               assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
        }
 
        #[tokio::test]
@@ -1829,7 +1908,7 @@ mod tests {
                                                _ = exit_receiver.changed() => true,
                                        }
                                })
-                       }, false,
+                       }, false, || Some(Duration::ZERO),
                );
                let t1 = tokio::spawn(bp_future);
                let t2 = tokio::spawn(async move {
@@ -1838,7 +1917,7 @@ mod tests {
 
                        let log_entries = nodes[0].logger.lines.lock().unwrap();
                        let expected_log = "Persisting scorer after update".to_string();
-                       assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
+                       assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
                });
 
                let (r1, r2) = tokio::join!(t1, t2);
index 5a8f887c1d82b2b9acb9739bd9055b96653340f5..8275c263c1231604e320444ce165fe73c8817134 100644 (file)
@@ -1,13 +1,13 @@
 [package]
 name = "lightning-block-sync"
-version = "0.0.118"
+version = "0.0.119"
 authors = ["Jeffrey Czyz", "Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
 description = """
 Utilities to fetch the chain data from a block source and feed them into Rust Lightning.
 """
-edition = "2018"
+edition = "2021"
 
 [package.metadata.docs.rs]
 all-features = true
@@ -20,11 +20,11 @@ rpc-client = [ "serde_json", "chunked_transfer" ]
 [dependencies]
 bitcoin = "0.30.2"
 hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
-lightning = { version = "0.0.118", path = "../lightning" }
-tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
+lightning = { version = "0.0.119", path = "../lightning" }
+tokio = { version = "1.35", features = [ "io-util", "net", "time", "rt" ], optional = true }
 serde_json = { version = "1.0", optional = true }
 chunked_transfer = { version = "1.4", optional = true }
 
 [dev-dependencies]
-lightning = { version = "0.0.118", path = "../lightning", features = ["_test_utils"] }
-tokio = { version = "1.14", features = [ "macros", "rt" ] }
+lightning = { version = "0.0.119", path = "../lightning", features = ["_test_utils"] }
+tokio = { version = "1.35", features = [ "macros", "rt" ] }
index 0f9ab8c43baadcc8cb72eec2375a063e9c4ffb37..ed811d2cc0c3f629e16a450937c58d8287f35689 100644 (file)
@@ -162,25 +162,8 @@ impl TryInto<(BlockHash, Option<u32>)> for JsonResponse {
 impl TryInto<Txid> for JsonResponse {
        type Error = std::io::Error;
        fn try_into(self) -> std::io::Result<Txid> {
-               match self.0.as_str() {
-                       None => Err(std::io::Error::new(
-                               std::io::ErrorKind::InvalidData,
-                               "expected JSON string",
-                       )),
-                       Some(hex_data) => match Vec::<u8>::from_hex(hex_data) {
-                               Err(_) => Err(std::io::Error::new(
-                                       std::io::ErrorKind::InvalidData,
-                                       "invalid hex data",
-                               )),
-                               Ok(txid_data) => match encode::deserialize(&txid_data) {
-                                       Err(_) => Err(std::io::Error::new(
-                                               std::io::ErrorKind::InvalidData,
-                                               "invalid txid",
-                                       )),
-                                       Ok(txid) => Ok(txid),
-                               },
-                       },
-               }
+               let hex_data = self.0.as_str().ok_or(Self::Error::new(std::io::ErrorKind::InvalidData, "expected JSON string" ))?;
+               Txid::from_str(hex_data).map_err(|err|Self::Error::new(std::io::ErrorKind::InvalidData, err.to_string() ))
        }
 }
 
@@ -264,10 +247,12 @@ impl TryInto<BlockHash> for JsonResponse {
 /// The REST `getutxos` endpoint retuns a whole pile of data we don't care about and one bit we do
 /// - whether the `hit bitmap` field had any entries. Thus we condense the result down into only
 /// that.
+#[cfg(feature = "rest-client")]
 pub(crate) struct GetUtxosResponse {
        pub(crate) hit_bitmap_nonempty: bool
 }
 
+#[cfg(feature = "rest-client")]
 impl TryInto<GetUtxosResponse> for JsonResponse {
        type Error = std::io::Error;
 
@@ -622,7 +607,7 @@ pub(crate) mod tests {
                match TryInto::<Txid>::try_into(response) {
                        Err(e) => {
                                assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
-                               assert_eq!(e.get_ref().unwrap().to_string(), "invalid hex data");
+                               assert_eq!(e.get_ref().unwrap().to_string(), "bad hex string length 6 (expected 64)");
                        }
                        Ok(_) => panic!("Expected error"),
                }
@@ -634,7 +619,7 @@ pub(crate) mod tests {
                match TryInto::<Txid>::try_into(response) {
                        Err(e) => {
                                assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
-                               assert_eq!(e.get_ref().unwrap().to_string(), "invalid txid");
+                               assert_eq!(e.get_ref().unwrap().to_string(), "bad hex string length 4 (expected 64)");
                        }
                        Ok(_) => panic!("Expected error"),
                }
@@ -650,6 +635,20 @@ pub(crate) mod tests {
                }
        }
 
+       #[test]
+       fn into_txid_from_bitcoind_rpc_json_response() {
+               let mut rpc_response = serde_json::json!(
+            {"error": "", "id": "770", "result": "7934f775149929a8b742487129a7c3a535dfb612f0b726cc67bc10bc2628f906"}
+
+        );
+        let r: std::io::Result<Txid> = JsonResponse(rpc_response.get_mut("result").unwrap().take())
+            .try_into();
+        assert_eq!(
+            r.unwrap().to_string(),
+            "7934f775149929a8b742487129a7c3a535dfb612f0b726cc67bc10bc2628f906"
+        );
+       }
+
        // TryInto<Transaction> can be used in two ways, first with plain hex response where data is
        // the hex encoded transaction (e.g. as a result of getrawtransaction) or as a JSON object
        // where the hex encoded transaction can be found in the hex field of the object (if present)
index 3b6e9f68376c9f00fa6427552bb87e7b6653cb16..591b0298793ffb3baadb1d0ee0009b1199a95941 100644 (file)
@@ -9,10 +9,7 @@ use bitcoin::blockdata::constants::ChainHash;
 use bitcoin::blockdata::transaction::{TxOut, OutPoint};
 use bitcoin::hash_types::BlockHash;
 
-use lightning::sign::NodeSigner;
-
-use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
-use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler};
+use lightning::ln::peer_handler::APeerManager;
 
 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
 use lightning::routing::utxo::{UtxoFuture, UtxoLookup, UtxoResult, UtxoLookupError};
@@ -135,21 +132,14 @@ impl<
 pub struct GossipVerifier<S: FutureSpawner,
        Blocks: Deref + Send + Sync + 'static + Clone,
        L: Deref + Send + Sync + 'static,
-       Descriptor: SocketDescriptor + Send + Sync + 'static,
-       CM: Deref + Send + Sync + 'static,
-       OM: Deref + Send + Sync + 'static,
-       CMH: Deref + Send + Sync + 'static,
-       NS: Deref + Send + Sync + 'static,
+       APM: Deref + Send + Sync + 'static + Clone,
 > where
        Blocks::Target: UtxoSource,
        L::Target: Logger,
-       CM::Target: ChannelMessageHandler,
-       OM::Target: OnionMessageHandler,
-       CMH::Target: CustomMessageHandler,
-       NS::Target: NodeSigner,
+       APM::Target: APeerManager,
 {
        source: Blocks,
-       peer_manager: Arc<PeerManager<Descriptor, CM, Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>, OM, L, CMH, NS>>,
+       peer_manager: APM,
        gossiper: Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>,
        spawn: S,
        block_cache: Arc<Mutex<VecDeque<(u32, Block)>>>,
@@ -160,24 +150,17 @@ const BLOCK_CACHE_SIZE: usize = 5;
 impl<S: FutureSpawner,
        Blocks: Deref + Send + Sync + Clone,
        L: Deref + Send + Sync,
-       Descriptor: SocketDescriptor + Send + Sync,
-       CM: Deref + Send + Sync,
-       OM: Deref + Send + Sync,
-       CMH: Deref + Send + Sync,
-       NS: Deref + Send + Sync,
-> GossipVerifier<S, Blocks, L, Descriptor, CM, OM, CMH, NS> where
+       APM: Deref + Send + Sync + Clone,
+> GossipVerifier<S, Blocks, L, APM> where
        Blocks::Target: UtxoSource,
        L::Target: Logger,
-       CM::Target: ChannelMessageHandler,
-       OM::Target: OnionMessageHandler,
-       CMH::Target: CustomMessageHandler,
-       NS::Target: NodeSigner,
+       APM::Target: APeerManager,
 {
        /// Constructs a new [`GossipVerifier`].
        ///
        /// This is expected to be given to a [`P2PGossipSync`] (initially constructed with `None` for
        /// the UTXO lookup) via [`P2PGossipSync::add_utxo_lookup`].
-       pub fn new(source: Blocks, spawn: S, gossiper: Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>, peer_manager: Arc<PeerManager<Descriptor, CM, Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>, OM, L, CMH, NS>>) -> Self {
+       pub fn new(source: Blocks, spawn: S, gossiper: Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>, peer_manager: APM) -> Self {
                Self {
                        source, spawn, gossiper, peer_manager,
                        block_cache: Arc::new(Mutex::new(VecDeque::with_capacity(BLOCK_CACHE_SIZE))),
@@ -269,18 +252,11 @@ impl<S: FutureSpawner,
 impl<S: FutureSpawner,
        Blocks: Deref + Send + Sync + Clone,
        L: Deref + Send + Sync,
-       Descriptor: SocketDescriptor + Send + Sync,
-       CM: Deref + Send + Sync,
-       OM: Deref + Send + Sync,
-       CMH: Deref + Send + Sync,
-       NS: Deref + Send + Sync,
-> Deref for GossipVerifier<S, Blocks, L, Descriptor, CM, OM, CMH, NS> where
+       APM: Deref + Send + Sync + Clone,
+> Deref for GossipVerifier<S, Blocks, L, APM> where
        Blocks::Target: UtxoSource,
        L::Target: Logger,
-       CM::Target: ChannelMessageHandler,
-       OM::Target: OnionMessageHandler,
-       CMH::Target: CustomMessageHandler,
-       NS::Target: NodeSigner,
+       APM::Target: APeerManager,
 {
        type Target = Self;
        fn deref(&self) -> &Self { self }
@@ -290,18 +266,11 @@ impl<S: FutureSpawner,
 impl<S: FutureSpawner,
        Blocks: Deref + Send + Sync + Clone,
        L: Deref + Send + Sync,
-       Descriptor: SocketDescriptor + Send + Sync,
-       CM: Deref + Send + Sync,
-       OM: Deref + Send + Sync,
-       CMH: Deref + Send + Sync,
-       NS: Deref + Send + Sync,
-> UtxoLookup for GossipVerifier<S, Blocks, L, Descriptor, CM, OM, CMH, NS> where
+       APM: Deref + Send + Sync + Clone,
+> UtxoLookup for GossipVerifier<S, Blocks, L, APM> where
        Blocks::Target: UtxoSource,
        L::Target: Logger,
-       CM::Target: ChannelMessageHandler,
-       OM::Target: OnionMessageHandler,
-       CMH::Target: CustomMessageHandler,
-       NS::Target: NodeSigner,
+       APM::Target: APeerManager,
 {
        fn get_utxo(&self, _chain_hash: &ChainHash, short_channel_id: u64) -> UtxoResult {
                let res = UtxoFuture::new();
@@ -309,11 +278,11 @@ impl<S: FutureSpawner,
                let source = self.source.clone();
                let gossiper = Arc::clone(&self.gossiper);
                let block_cache = Arc::clone(&self.block_cache);
-               let pm = Arc::clone(&self.peer_manager);
+               let pm = self.peer_manager.clone();
                self.spawn.spawn(async move {
                        let res = Self::retrieve_utxo(source, block_cache, short_channel_id).await;
                        fut.resolve(gossiper.network_graph(), &*gossiper, res);
-                       pm.process_events();
+                       pm.as_ref().process_events();
                });
                UtxoResult::Async(res)
        }
index df113fb012ae7d22556b18eaaeefe75e379b2c60..8cb0ff70a2e67ee1dee3330ffa77532b8bd68b83 100644 (file)
@@ -69,10 +69,10 @@ BlockSourceResult<ValidatedBlockHeader> where B::Target: BlockSource {
 ///    R: Router,
 ///    L: Logger,
 ///    C: chain::Filter,
-///    P: chainmonitor::Persist<SP::Signer>,
+///    P: chainmonitor::Persist<SP::EcdsaSigner>,
 /// >(
 ///    block_source: &B,
-///    chain_monitor: &ChainMonitor<SP::Signer, &C, &T, &F, &L, &P>,
+///    chain_monitor: &ChainMonitor<SP::EcdsaSigner, &C, &T, &F, &L, &P>,
 ///    config: UserConfig,
 ///    entropy_source: &ES,
 ///    node_signer: &NS,
@@ -85,7 +85,7 @@ BlockSourceResult<ValidatedBlockHeader> where B::Target: BlockSource {
 /// ) {
 ///    // Read a serialized channel monitor paired with the block hash when it was persisted.
 ///    let serialized_monitor = "...";
-///    let (monitor_block_hash, mut monitor) = <(BlockHash, ChannelMonitor<SP::Signer>)>::read(
+///    let (monitor_block_hash, mut monitor) = <(BlockHash, ChannelMonitor<SP::EcdsaSigner>)>::read(
 ///            &mut Cursor::new(&serialized_monitor), (entropy_source, signer_provider)).unwrap();
 ///
 ///    // Read the channel manager paired with the block hash when it was persisted.
@@ -103,7 +103,7 @@ BlockSourceResult<ValidatedBlockHeader> where B::Target: BlockSource {
 ///                    config,
 ///                    vec![&mut monitor],
 ///            );
-///            <(BlockHash, ChannelManager<&ChainMonitor<SP::Signer, &C, &T, &F, &L, &P>, &T, &ES, &NS, &SP, &F, &R, &L>)>::read(
+///            <(BlockHash, ChannelManager<&ChainMonitor<SP::EcdsaSigner, &C, &T, &F, &L, &P>, &T, &ES, &NS, &SP, &F, &R, &L>)>::read(
 ///                    &mut Cursor::new(&serialized_manager), read_args).unwrap()
 ///    };
 ///
index 77ff3f0810b6f4a3bd6addd98a11faa383660b9d..4a01d4673b31e91d56c3cb350d995c1b7a3d7403 100644 (file)
@@ -13,9 +13,8 @@
 //! Both features support either blocking I/O using `std::net::TcpStream` or, with feature `tokio`,
 //! non-blocking I/O using `tokio::net::TcpStream` from inside a Tokio runtime.
 
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 #![deny(unsafe_code)]
index 182ff498512c77515d561376cc04247e876c490e..79331a3f0e06a67452818ece6465a20d57610b79 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-custom-message"
-version = "0.0.118"
+version = "0.0.119"
 authors = ["Jeffrey Czyz"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
@@ -15,4 +15,4 @@ rustdoc-args = ["--cfg", "docsrs"]
 
 [dependencies]
 bitcoin = "0.30.2"
-lightning = { version = "0.0.118", path = "../lightning" }
+lightning = { version = "0.0.119", path = "../lightning" }
index 0b45fa41e6cbcdede6a5bea5c71446a5f0d0f239..4b29f21e0f55b01551b3472faf7f36314ee5b99b 100644 (file)
@@ -1,14 +1,14 @@
 [package]
 name = "lightning-invoice"
 description = "Data structures to parse and serialize BOLT11 lightning invoices"
-version = "0.26.0"
+version = "0.27.0"
 authors = ["Sebastian Geisler <sgeisler@wh2.tu-dresden.de>"]
 documentation = "https://docs.rs/lightning-invoice/"
 license = "MIT OR Apache-2.0"
 keywords = [ "lightning", "bitcoin", "invoice", "BOLT11" ]
 readme = "README.md"
 repository = "https://github.com/lightningdevkit/rust-lightning/"
-edition = "2018"
+edition = "2021"
 
 [package.metadata.docs.rs]
 all-features = true
@@ -17,19 +17,18 @@ rustdoc-args = ["--cfg", "docsrs"]
 [features]
 default = ["std"]
 no-std = ["hashbrown", "lightning/no-std"]
-std = ["bitcoin_hashes/std", "num-traits/std", "lightning/std", "bech32/std"]
+std = ["bitcoin/std", "num-traits/std", "lightning/std", "bech32/std"]
 
 [dependencies]
 bech32 = { version = "0.9.0", default-features = false }
-lightning = { version = "0.0.118", path = "../lightning", default-features = false }
+lightning = { version = "0.0.119", path = "../lightning", default-features = false }
 secp256k1 = { version = "0.27.0", default-features = false, features = ["recovery", "alloc"] }
 num-traits = { version = "0.2.8", default-features = false }
-bitcoin_hashes = { version = "0.12.0", default-features = false }
 hashbrown = { version = "0.8", optional = true }
 serde = { version = "1.0.118", optional = true }
 bitcoin = { version = "0.30.2", default-features = false }
 
 [dev-dependencies]
-lightning = { version = "0.0.118", path = "../lightning", default-features = false, features = ["_test_utils"] }
+lightning = { version = "0.0.119", path = "../lightning", default-features = false, features = ["_test_utils"] }
 hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
 serde_json = { version = "1"}
index 0d5fbb365ba3063fefa0b189bb858e7b7c843091..746fe63ba03d0d38e983212fa01f37a4a6d07551 100644 (file)
@@ -3,7 +3,7 @@ name = "lightning-invoice-fuzz"
 version = "0.0.1"
 authors = ["Automatically generated"]
 publish = false
-edition = "2018"
+edition = "2021"
 
 [package.metadata]
 cargo-fuzz = true
index 406f96764e9971ad97abc4f7dbdfb352e94cad35..871c6c7d755ee4dad7656f7649a0681ada25f3c5 100644 (file)
@@ -48,9 +48,9 @@ mod tests {
         for (idx, c) in hex.as_bytes().iter().filter(|&&c| c != b'\n').enumerate() {
             b <<= 4;
             match *c {
-                b'A'...b'F' => b |= c - b'A' + 10,
-                b'a'...b'f' => b |= c - b'a' + 10,
-                b'0'...b'9' => b |= c - b'0',
+                b'A'..=b'F' => b |= c - b'A' + 10,
+                b'a'..=b'f' => b |= c - b'a' + 10,
+                b'0'..=b'9' => b |= c - b'0',
                 _ => panic!("Bad hex"),
             }
             if (idx & 1) == 1 {
index 0276b742f28405b21c2099adeb4333654784c231..c75373ea9dac3d127575b95d78954400164d1dc9 100644 (file)
@@ -11,8 +11,8 @@ use bech32::{u5, FromBase32};
 
 use bitcoin::{PubkeyHash, ScriptHash};
 use bitcoin::address::WitnessVersion;
-use bitcoin_hashes::Hash;
-use bitcoin_hashes::sha256;
+use bitcoin::hashes::Hash;
+use bitcoin::hashes::sha256;
 use crate::prelude::*;
 use lightning::ln::PaymentSecret;
 use lightning::routing::gossip::RoutingFees;
@@ -564,14 +564,14 @@ impl FromBase32 for Fallback {
                        17 => {
                                let pkh = match PubkeyHash::from_slice(&bytes) {
                                        Ok(pkh) => pkh,
-                                       Err(bitcoin_hashes::Error::InvalidLength(_, _)) => return Err(Bolt11ParseError::InvalidPubKeyHashLength),
+                                       Err(bitcoin::hashes::Error::InvalidLength(_, _)) => return Err(Bolt11ParseError::InvalidPubKeyHashLength),
                                };
                                Ok(Fallback::PubKeyHash(pkh))
                        }
                        18 => {
                                let sh = match ScriptHash::from_slice(&bytes) {
                                        Ok(sh) => sh,
-                                       Err(bitcoin_hashes::Error::InvalidLength(_, _)) => return Err(Bolt11ParseError::InvalidScriptHashLength),
+                                       Err(bitcoin::hashes::Error::InvalidLength(_, _)) => return Err(Bolt11ParseError::InvalidScriptHashLength),
                                };
                                Ok(Fallback::ScriptHash(sh))
                        }
@@ -726,7 +726,7 @@ mod test {
        use crate::de::Bolt11ParseError;
        use secp256k1::PublicKey;
        use bech32::u5;
-       use bitcoin_hashes::sha256;
+       use bitcoin::hashes::sha256;
        use std::str::FromStr;
 
        const CHARSET_REV: [i8; 128] = [
@@ -856,7 +856,7 @@ mod test {
                use bech32::FromBase32;
                use bitcoin::{PubkeyHash, ScriptHash};
                use bitcoin::address::WitnessVersion;
-               use bitcoin_hashes::Hash;
+               use bitcoin::hashes::Hash;
 
                let cases = vec![
                        (
index d87c6c89372080b5696fd754f35fb8eeddf1c75d..df0412bfc5d3e0d3876b177200527e01fd272678 100644 (file)
@@ -1,6 +1,5 @@
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 #![deny(non_upper_case_globals)]
@@ -31,7 +30,6 @@ pub mod payment;
 pub mod utils;
 
 extern crate bech32;
-extern crate bitcoin_hashes;
 #[macro_use] extern crate lightning;
 extern crate num_traits;
 extern crate secp256k1;
@@ -47,7 +45,7 @@ use std::time::SystemTime;
 use bech32::u5;
 use bitcoin::{Address, Network, PubkeyHash, ScriptHash};
 use bitcoin::address::{Payload, WitnessProgram, WitnessVersion};
-use bitcoin_hashes::{Hash, sha256};
+use bitcoin::hashes::{Hash, sha256};
 use lightning::ln::features::Bolt11InvoiceFeatures;
 use lightning::util::invoice::construct_invoice_preimage;
 
@@ -79,6 +77,7 @@ mod de;
 mod ser;
 mod tb;
 
+#[allow(unused_imports)]
 mod prelude {
        #[cfg(feature = "hashbrown")]
        extern crate hashbrown;
@@ -94,10 +93,6 @@ mod prelude {
 
 use crate::prelude::*;
 
-/// Sync compat for std/no_std
-#[cfg(not(feature = "std"))]
-mod sync;
-
 /// Errors that indicate what is wrong with the invoice. They have some granularity for debug
 /// reasons, but should generally result in an "invalid BOLT11 invoice" message for the user.
 #[allow(missing_docs)]
@@ -167,10 +162,10 @@ pub const DEFAULT_MIN_FINAL_CLTV_EXPIRY_DELTA: u64 = 18;
 /// extern crate secp256k1;
 /// extern crate lightning;
 /// extern crate lightning_invoice;
-/// extern crate bitcoin_hashes;
+/// extern crate bitcoin;
 ///
-/// use bitcoin_hashes::Hash;
-/// use bitcoin_hashes::sha256;
+/// use bitcoin::hashes::Hash;
+/// use bitcoin::hashes::sha256;
 ///
 /// use secp256k1::Secp256k1;
 /// use secp256k1::SecretKey;
@@ -528,7 +523,7 @@ impl Ord for Bolt11InvoiceSignature {
 /// The encoded route has to be <1024 5bit characters long (<=639 bytes or <=12 hops)
 ///
 #[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
-pub struct PrivateRoute(pub RouteHint);
+pub struct PrivateRoute(RouteHint);
 
 /// Tag constants as specified in BOLT11
 #[allow(missing_docs)]
@@ -1757,7 +1752,7 @@ impl<'de> Deserialize<'de> for Bolt11Invoice {
 #[cfg(test)]
 mod test {
        use bitcoin::ScriptBuf;
-       use bitcoin_hashes::sha256;
+       use bitcoin::hashes::sha256;
        use std::str::FromStr;
 
        #[test]
@@ -2051,7 +2046,7 @@ mod test {
                use lightning::routing::router::RouteHintHop;
                use secp256k1::Secp256k1;
                use secp256k1::{SecretKey, PublicKey};
-               use std::time::{UNIX_EPOCH, Duration};
+               use std::time::Duration;
 
                let secp_ctx = Secp256k1::new();
 
@@ -2140,7 +2135,7 @@ mod test {
                assert_eq!(invoice.currency(), Currency::BitcoinTestnet);
                #[cfg(feature = "std")]
                assert_eq!(
-                       invoice.timestamp().duration_since(UNIX_EPOCH).unwrap().as_secs(),
+                       invoice.timestamp().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(),
                        1234567
                );
                assert_eq!(invoice.payee_pub_key(), Some(&public_key));
index 7306d12f5e6c0061825ebf1d2fde3c2decafde79..8196fa9eb89a81d7df74300484ee96cddee5588f 100644 (file)
@@ -10,7 +10,7 @@
 //! Convenient utilities for paying Lightning invoices.
 
 use crate::Bolt11Invoice;
-use crate::bitcoin_hashes::Hash;
+use bitcoin::hashes::Hash;
 
 use lightning::ln::PaymentHash;
 use lightning::ln::channelmanager::RecipientOnionFields;
@@ -84,15 +84,13 @@ fn params_from_invoice(invoice: &Bolt11Invoice, amount_msat: u64)
 mod tests {
        use super::*;
        use crate::{InvoiceBuilder, Currency};
-       use bitcoin_hashes::sha256::Hash as Sha256;
-       use lightning::events::Event;
-       use lightning::ln::channelmanager::{Retry, PaymentId};
-       use lightning::ln::msgs::ChannelMessageHandler;
+       use bitcoin::hashes::sha256::Hash as Sha256;
        use lightning::ln::PaymentSecret;
-       use lightning::ln::functional_test_utils::*;
        use lightning::routing::router::Payee;
        use secp256k1::{SecretKey, PublicKey, Secp256k1};
-       use std::time::{SystemTime, Duration};
+       use core::time::Duration;
+       #[cfg(feature = "std")]
+       use std::time::SystemTime;
 
        fn duration_since_epoch() -> Duration {
                #[cfg(feature = "std")]
@@ -171,6 +169,10 @@ mod tests {
        #[test]
        #[cfg(feature = "std")]
        fn payment_metadata_end_to_end() {
+               use lightning::events::Event;
+               use lightning::ln::channelmanager::{Retry, PaymentId};
+               use lightning::ln::msgs::ChannelMessageHandler;
+               use lightning::ln::functional_test_utils::*;
                // Test that a payment metadata read from an invoice passed to `pay_invoice` makes it all
                // the way out through the `PaymentClaimable` event.
                let chanmon_cfgs = create_chanmon_cfgs(2);
diff --git a/lightning-invoice/src/sync.rs b/lightning-invoice/src/sync.rs
deleted file mode 100644 (file)
index fae923f..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-use core::cell::{RefCell, RefMut};
-use core::ops::{Deref, DerefMut};
-
-pub type LockResult<Guard> = Result<Guard, ()>;
-
-pub struct Mutex<T: ?Sized> {
-       inner: RefCell<T>
-}
-
-#[must_use = "if unused the Mutex will immediately unlock"]
-pub struct MutexGuard<'a, T: ?Sized + 'a> {
-       lock: RefMut<'a, T>,
-}
-
-impl<T: ?Sized> Deref for MutexGuard<'_, T> {
-       type Target = T;
-
-       fn deref(&self) -> &T {
-               &self.lock.deref()
-       }
-}
-
-impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
-       fn deref_mut(&mut self) -> &mut T {
-               self.lock.deref_mut()
-       }
-}
-
-impl<T> Mutex<T> {
-       pub fn new(inner: T) -> Mutex<T> {
-               Mutex { inner: RefCell::new(inner) }
-       }
-
-       pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
-               Ok(MutexGuard { lock: self.inner.borrow_mut() })
-       }
-}
index 9930b545662ae4598115f548b983c24860c3337e..5e8b72467e5da655cd9f77876ac692b948498833 100644 (file)
@@ -4,7 +4,7 @@ use crate::{Bolt11Invoice, CreationError, Currency, InvoiceBuilder, SignOrCreati
 
 use crate::{prelude::*, Description, Bolt11InvoiceDescription, Sha256};
 use bech32::ToBase32;
-use bitcoin_hashes::Hash;
+use bitcoin::hashes::Hash;
 use lightning::chain;
 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use lightning::sign::{Recipient, NodeSigner, SignerProvider, EntropySource};
@@ -14,7 +14,7 @@ use lightning::ln::channelmanager::{PhantomRouteHints, MIN_CLTV_EXPIRY_DELTA};
 use lightning::ln::inbound_payment::{create, create_from_hash, ExpandedKey};
 use lightning::routing::gossip::RoutingFees;
 use lightning::routing::router::{RouteHint, RouteHintHop, Router};
-use lightning::util::logger::Logger;
+use lightning::util::logger::{Logger, Record};
 use secp256k1::PublicKey;
 use core::ops::Deref;
 use core::time::Duration;
@@ -335,7 +335,7 @@ pub fn create_invoice_from_channelmanager<M: Deref, T: Deref, ES: Deref, NS: Der
        min_final_cltv_expiry_delta: Option<u16>,
 ) -> Result<Bolt11Invoice, SignOrCreationError<()>>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -376,7 +376,7 @@ pub fn create_invoice_from_channelmanager_with_description_hash<M: Deref, T: Der
        invoice_expiry_delta_secs: u32, min_final_cltv_expiry_delta: Option<u16>,
 ) -> Result<Bolt11Invoice, SignOrCreationError<()>>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -406,7 +406,7 @@ pub fn create_invoice_from_channelmanager_with_description_hash_and_duration_sin
        duration_since_epoch: Duration, invoice_expiry_delta_secs: u32, min_final_cltv_expiry_delta: Option<u16>,
 ) -> Result<Bolt11Invoice, SignOrCreationError<()>>
                where
-                       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+                       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
                        T::Target: BroadcasterInterface,
                        ES::Target: EntropySource,
                        NS::Target: NodeSigner,
@@ -431,7 +431,7 @@ pub fn create_invoice_from_channelmanager_and_duration_since_epoch<M: Deref, T:
        invoice_expiry_delta_secs: u32, min_final_cltv_expiry_delta: Option<u16>,
 ) -> Result<Bolt11Invoice, SignOrCreationError<()>>
                where
-                       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+                       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
                        T::Target: BroadcasterInterface,
                        ES::Target: EntropySource,
                        NS::Target: NodeSigner,
@@ -455,7 +455,7 @@ fn _create_invoice_from_channelmanager_and_duration_since_epoch<M: Deref, T: Der
        duration_since_epoch: Duration, invoice_expiry_delta_secs: u32, min_final_cltv_expiry_delta: Option<u16>,
 ) -> Result<Bolt11Invoice, SignOrCreationError<()>>
                where
-                       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+                       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
                        T::Target: BroadcasterInterface,
                        ES::Target: EntropySource,
                        NS::Target: NodeSigner,
@@ -488,7 +488,7 @@ pub fn create_invoice_from_channelmanager_and_duration_since_epoch_with_payment_
        invoice_expiry_delta_secs: u32, payment_hash: PaymentHash, min_final_cltv_expiry_delta: Option<u16>,
 ) -> Result<Bolt11Invoice, SignOrCreationError<()>>
        where
-               M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+               M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
                T::Target: BroadcasterInterface,
                ES::Target: EntropySource,
                NS::Target: NodeSigner,
@@ -518,7 +518,7 @@ fn _create_invoice_from_channelmanager_and_duration_since_epoch_with_payment_has
        payment_secret: PaymentSecret, min_final_cltv_expiry_delta: Option<u16>,
 ) -> Result<Bolt11Invoice, SignOrCreationError<()>>
        where
-               M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+               M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
                T::Target: BroadcasterInterface,
                ES::Target: EntropySource,
                NS::Target: NodeSigner,
@@ -626,6 +626,7 @@ where
 
        log_trace!(logger, "Considering {} channels for invoice route hints", channels.len());
        for channel in channels.into_iter().filter(|chan| chan.is_channel_ready) {
+               let logger = WithChannelDetails::from(logger, &channel);
                if channel.get_inbound_payment_scid().is_none() || channel.counterparty.forwarding_info.is_none() {
                        log_trace!(logger, "Ignoring channel {} for invoice route hints", &channel.channel_id);
                        continue;
@@ -710,6 +711,7 @@ where
                .into_iter()
                .map(|(_, channel)| channel)
                .filter(|channel| {
+                       let logger = WithChannelDetails::from(logger, &channel);
                        let has_enough_capacity = channel.inbound_capacity_msat >= min_inbound_capacity;
                        let include_channel = if has_pub_unconf_chan {
                                // If we have a public channel, but it doesn't have enough confirmations to (yet)
@@ -790,16 +792,39 @@ fn prefer_current_channel(min_inbound_capacity_msat: Option<u64>, current_channe
        current_channel > candidate_channel
 }
 
+/// Adds relevant context to a [`Record`] before passing it to the wrapped [`Logger`].
+struct WithChannelDetails<'a, 'b, L: Deref> where L::Target: Logger {
+       /// The logger to delegate to after adding context to the record.
+       logger: &'a L,
+       /// The [`ChannelDetails`] for adding relevant context to the logged record.
+       details: &'b ChannelDetails
+}
+
+impl<'a, 'b, L: Deref> Logger for WithChannelDetails<'a, 'b, L> where L::Target: Logger {
+       fn log(&self, mut record: Record) {
+               record.peer_id = Some(self.details.counterparty.node_id);
+               record.channel_id = Some(self.details.channel_id);
+               self.logger.log(record)
+       }
+}
+
+impl<'a, 'b, L: Deref> WithChannelDetails<'a, 'b, L> where L::Target: Logger {
+       fn from(logger: &'a L, details: &'b ChannelDetails) -> Self {
+               Self { logger, details }
+       }
+}
+
 #[cfg(test)]
 mod test {
-       use core::cell::RefCell;
        use core::time::Duration;
        use crate::{Currency, Description, Bolt11InvoiceDescription, SignOrCreationError, CreationError};
-       use bitcoin_hashes::{Hash, sha256};
-       use bitcoin_hashes::sha256::Hash as Sha256;
+       use bitcoin::hashes::{Hash, sha256};
+       use bitcoin::hashes::sha256::Hash as Sha256;
        use lightning::sign::PhantomKeysManager;
-       use lightning::events::{MessageSendEvent, MessageSendEventsProvider, Event, EventsProvider};
-       use lightning::ln::{PaymentPreimage, PaymentHash};
+       use lightning::events::{MessageSendEvent, MessageSendEventsProvider};
+       use lightning::ln::PaymentHash;
+       #[cfg(feature = "std")]
+       use lightning::ln::PaymentPreimage;
        use lightning::ln::channelmanager::{PhantomRouteHints, MIN_FINAL_CLTV_EXPIRY_DELTA, PaymentId, RecipientOnionFields, Retry};
        use lightning::ln::functional_test_utils::*;
        use lightning::ln::msgs::ChannelMessageHandler;
@@ -1270,6 +1295,9 @@ mod test {
 
        #[cfg(feature = "std")]
        fn do_test_multi_node_receive(user_generated_pmt_hash: bool) {
+               use lightning::events::{Event, EventsProvider};
+               use core::cell::RefCell;
+
                let mut chanmon_cfgs = create_chanmon_cfgs(3);
                let seed_1 = [42u8; 32];
                let seed_2 = [43u8; 32];
index 98886bef78809aeb7bd9ef9e1784e72e3cf96e4a..92bc87bef63e86e98c7678b88dc2ce251352c874 100644 (file)
@@ -1,5 +1,4 @@
 extern crate bech32;
-extern crate bitcoin_hashes;
 extern crate lightning;
 extern crate lightning_invoice;
 extern crate secp256k1;
@@ -8,7 +7,7 @@ extern crate hex;
 use bitcoin::address::WitnessVersion;
 use bitcoin::{PubkeyHash, ScriptHash};
 use bitcoin::hashes::hex::FromHex;
-use bitcoin_hashes::{sha256, Hash};
+use bitcoin::hashes::{sha256, Hash};
 use lightning::ln::PaymentSecret;
 use lightning::routing::gossip::RoutingFees;
 use lightning::routing::router::{RouteHint, RouteHintHop};
index 247481fcfc853e5da507cd91b2cb96e0c016fd5d..02ac6a67485dbc48fa375e0b9db252da83a5c620 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-net-tokio"
-version = "0.0.118"
+version = "0.0.119"
 authors = ["Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning/"
@@ -8,7 +8,7 @@ description = """
 Implementation of the rust-lightning network stack using Tokio.
 For Rust-Lightning clients which wish to make direct connections to Lightning P2P nodes, this is a simple alternative to implementing the required network stack, especially for those already using Tokio.
 """
-edition = "2018"
+edition = "2021"
 
 [package.metadata.docs.rs]
 all-features = true
@@ -16,9 +16,9 @@ rustdoc-args = ["--cfg", "docsrs"]
 
 [dependencies]
 bitcoin = "0.30.2"
-lightning = { version = "0.0.118", path = "../lightning" }
-tokio = { version = "1.0", features = [ "rt", "sync", "net", "time" ] }
+lightning = { version = "0.0.119", path = "../lightning" }
+tokio = { version = "1.35", features = [ "rt", "sync", "net", "time" ] }
 
 [dev-dependencies]
-tokio = { version = "1.14", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
-lightning = { version = "0.0.118", path = "../lightning", features = ["_test_utils"] }
+tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
+lightning = { version = "0.0.119", path = "../lightning", features = ["_test_utils"] }
index d4f75dd6cd8b073d0e29de1ecf76685b8122ab6e..1aa2cc25a13cc0772ee2e958da66aca602ce0eb4 100644 (file)
@@ -22,9 +22,8 @@
 //!
 //! [`PeerManager`]: lightning::ln::peer_handler::PeerManager
 
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
@@ -571,7 +570,7 @@ mod tests {
 
        pub struct TestLogger();
        impl lightning::util::logger::Logger for TestLogger {
-               fn log(&self, record: &lightning::util::logger::Record) {
+               fn log(&self, record: lightning::util::logger::Record) {
                        println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
                }
        }
index 387366bff0e9f4826af513633f126d42b1e9253c..c44444c586f094a7ff951993ea13a56c6ef5354c 100644 (file)
@@ -1,13 +1,13 @@
 [package]
 name = "lightning-persister"
-version = "0.0.118"
+version = "0.0.119"
 authors = ["Valentine Wallace", "Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
 description = """
 Utilities for LDK data persistence and retrieval.
 """
-edition = "2018"
+edition = "2021"
 
 [package.metadata.docs.rs]
 all-features = true
@@ -15,7 +15,7 @@ rustdoc-args = ["--cfg", "docsrs"]
 
 [dependencies]
 bitcoin = "0.30.2"
-lightning = { version = "0.0.118", path = "../lightning" }
+lightning = { version = "0.0.119", path = "../lightning" }
 
 [target.'cfg(windows)'.dependencies]
 windows-sys = { version = "0.48.0", default-features = false, features = ["Win32_Storage_FileSystem", "Win32_Foundation"] }
@@ -24,5 +24,5 @@ windows-sys = { version = "0.48.0", default-features = false, features = ["Win32
 criterion = { version = "0.4", optional = true, default-features = false }
 
 [dev-dependencies]
-lightning = { version = "0.0.118", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.119", path = "../lightning", features = ["_test_utils"] }
 bitcoin = { version = "0.30.2", default-features = false }
index 118cf9af7ba0e9554ca23514534007ca8c824e0a..b5c6526207df007f10f33df3deac57629015323e 100644 (file)
@@ -381,11 +381,6 @@ mod tests {
        use lightning::util::persist::read_channel_monitors;
        use std::fs;
        use std::str::FromStr;
-       #[cfg(target_os = "windows")]
-       use {
-               lightning::get_event_msg,
-               lightning::ln::msgs::ChannelMessageHandler,
-       };
 
        impl Drop for FilesystemStore {
                fn drop(&mut self) {
index ae258e137d742f32ce1067d6508ad133e02bd67a..8e7d9055a6aae014b80daf6d9280717fda1a713f 100644 (file)
@@ -1,8 +1,7 @@
 //! Provides utilities for LDK data persistence and retrieval.
-//
-// TODO: Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 
index 2018e3b24836094684c9b5f4848b6f28c808abee..28a2ddedfdc4cb30eacf72e154e6f7204481ba73 100644 (file)
@@ -1,10 +1,10 @@
 [package]
 name = "lightning-rapid-gossip-sync"
-version = "0.0.118"
+version = "0.0.119"
 authors = ["Arik Sosman <git@arik.io>"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
-edition = "2018"
+edition = "2021"
 description = """
 Utility to process gossip routing data from Rapid Gossip Sync Server.
 """
@@ -15,11 +15,11 @@ no-std = ["lightning/no-std"]
 std = ["lightning/std"]
 
 [dependencies]
-lightning = { version = "0.0.118", path = "../lightning", default-features = false }
+lightning = { version = "0.0.119", path = "../lightning", default-features = false }
 bitcoin = { version = "0.30.2", default-features = false }
 
 [target.'cfg(ldk_bench)'.dependencies]
 criterion = { version = "0.4", optional = true, default-features = false }
 
 [dev-dependencies]
-lightning = { version = "0.0.118", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.119", path = "../lightning", features = ["_test_utils"] }
index 5a61be7990e2a17270b7c61deeb8944952255735..0561975f82151e0deae7782c4893b1056da4b0d5 100644 (file)
@@ -1,6 +1,5 @@
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 #![deny(unsafe_code)]
@@ -49,7 +48,7 @@
 //! # use lightning::util::logger::{Logger, Record};
 //! # struct FakeLogger {}
 //! # impl Logger for FakeLogger {
-//! #     fn log(&self, record: &Record) { }
+//! #     fn log(&self, record: Record) { }
 //! # }
 //! # let logger = FakeLogger {};
 //!
index 20e03ce6c2767721295e139bef579ff5e1fa63be..a2630400fb33bb690dea8941495ce5493ed7a8d3 100644 (file)
@@ -1,20 +1,21 @@
 [package]
 name = "lightning-transaction-sync"
-version = "0.0.118"
+version = "0.0.119"
 authors = ["Elias Rohrer"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
 description = """
 Utilities for syncing LDK via the transaction-based `Confirm` interface.
 """
-edition = "2018"
+edition = "2021"
 
 [package.metadata.docs.rs]
 all-features = true
 rustdoc-args = ["--cfg", "docsrs"]
 
 [features]
-default = []
+default = ["time"]
+time = []
 esplora-async = ["async-interface", "esplora-client/async", "futures"]
 esplora-async-https = ["esplora-async", "esplora-client/async-https-rustls"]
 esplora-blocking = ["esplora-client/blocking"]
@@ -22,7 +23,7 @@ electrum = ["electrum-client"]
 async-interface = []
 
 [dependencies]
-lightning = { version = "0.0.118", path = "../lightning", default-features = false }
+lightning = { version = "0.0.119", path = "../lightning", default-features = false, features = ["std"] }
 bitcoin = { version = "0.30.2", default-features = false }
 bdk-macros = "0.6"
 futures = { version = "0.3", optional = true }
@@ -30,6 +31,11 @@ esplora-client = { version = "0.6", default-features = false, optional = true }
 electrum-client = { version = "0.18.0", optional = true }
 
 [dev-dependencies]
-lightning = { version = "0.0.118", path = "../lightning", features = ["std", "_test_utils"] }
-electrsd = { version = "0.26.0", features = ["legacy", "esplora_a33e97e1", "bitcoind_25_0"] }
-tokio = { version = "1.14.0", features = ["full"] }
+lightning = { version = "0.0.119", path = "../lightning", default-features = false, features = ["std", "_test_utils"] }
+tokio = { version = "1.35.0", features = ["full"] }
+
+[target.'cfg(not(no_download))'.dev-dependencies]
+electrsd = { version = "0.26.0", default-features = false, features = ["legacy", "esplora_a33e97e1", "bitcoind_25_0"] }
+
+[target.'cfg(no_download)'.dev-dependencies]
+electrsd = { version = "0.26.0", default-features = false, features = ["legacy"] }
index 07e11338905370d9385dc7d5259053b624055ea4..d0c8afef77e3161ec191f41a0ceae4aa5eb8efe0 100644 (file)
@@ -86,6 +86,7 @@ where
                let mut sync_state = self.sync_state.lock().unwrap();
 
                log_trace!(self.logger, "Starting transaction sync.");
+               #[cfg(feature = "time")]
                let start_time = Instant::now();
                let mut num_confirmed = 0;
                let mut num_unconfirmed = 0;
@@ -210,10 +211,15 @@ where
                                sync_state.pending_sync = false;
                        }
                }
+               #[cfg(feature = "time")]
                log_debug!(self.logger,
                        "Finished transaction sync at tip {} in {}ms: {} confirmed, {} unconfirmed.",
                        tip_header.block_hash(), start_time.elapsed().as_millis(), num_confirmed,
                        num_unconfirmed);
+               #[cfg(not(feature = "time"))]
+               log_debug!(self.logger,
+                       "Finished transaction sync at tip {}: {} confirmed, {} unconfirmed.",
+                       tip_header.block_hash(), num_confirmed, num_unconfirmed);
                Ok(())
        }
 
index 953f8b0718c3526cff701142c17a0a5d7e32e339..eb52faf33648cfb173985b13c29ecb9d754fdb87 100644 (file)
@@ -14,7 +14,6 @@ use esplora_client::r#async::AsyncClient;
 #[cfg(not(feature = "async-interface"))]
 use esplora_client::blocking::BlockingClient;
 
-use std::time::Instant;
 use std::collections::HashSet;
 use core::ops::Deref;
 
@@ -91,7 +90,8 @@ where
                let mut sync_state = self.sync_state.lock().await;
 
                log_trace!(self.logger, "Starting transaction sync.");
-               let start_time = Instant::now();
+               #[cfg(feature = "time")]
+               let start_time = std::time::Instant::now();
                let mut num_confirmed = 0;
                let mut num_unconfirmed = 0;
 
@@ -227,8 +227,12 @@ where
                                sync_state.pending_sync = false;
                        }
                }
+               #[cfg(feature = "time")]
                log_debug!(self.logger, "Finished transaction sync at tip {} in {}ms: {} confirmed, {} unconfirmed.",
                                tip_hash, start_time.elapsed().as_millis(), num_confirmed, num_unconfirmed);
+               #[cfg(not(feature = "time"))]
+               log_debug!(self.logger, "Finished transaction sync at tip {}: {} confirmed, {} unconfirmed.",
+                               tip_hash, num_confirmed, num_unconfirmed);
                Ok(())
        }
 
index 21b6a4e97c15d0e3261251db829dc1c38e7c9c92..7bd4b4aee3f0784d5ad5aa92c7638e49b08790aa 100644 (file)
@@ -58,9 +58,8 @@
 //! [`ChainMonitor`]: lightning::chain::chainmonitor::ChainMonitor
 //! [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
 
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 #![deny(unsafe_code)]
index 76d751af39192754c67575bdc27650ab89aa88e3..adec94377054a019e20122cb64e75d0f31c53562 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning"
-version = "0.0.118"
+version = "0.0.119"
 authors = ["Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning/"
@@ -9,7 +9,7 @@ A Bitcoin Lightning library in Rust.
 Does most of the hard work, without implying a specific runtime, requiring clients implement basic network logic, chain interactions and disk storage.
 Still missing tons of error-handling. See GitHub issues for suggested projects if you want to contribute. Don't have to bother telling you not to use this for anything serious, because you'd have to build a client around it to even try.
 """
-edition = "2018"
+edition = "2021"
 
 [package.metadata.docs.rs]
 features = ["std"]
@@ -31,7 +31,7 @@ unsafe_revoked_tx_signing = []
 # Override signing to not include randomness when generating signatures for test vectors.
 _test_vectors = []
 
-no-std = ["hashbrown", "bitcoin/no-std", "core2/alloc"]
+no-std = ["hashbrown", "bitcoin/no-std", "core2/alloc", "libm"]
 std = ["bitcoin/std"]
 
 # Generates low-r bitcoin signatures, which saves 1 byte in 50% of the cases
@@ -48,6 +48,7 @@ regex = { version = "1.5.6", optional = true }
 backtrace = { version = "0.3", optional = true }
 
 core2 = { version = "0.3.0", optional = true, default-features = false }
+libm = { version = "0.2", optional = true, default-features = false }
 
 [dev-dependencies]
 regex = "1.5.6"
index d75b4f25b368481d8d8a08ca1966d416b0886745..b1fc9e1c0a443c36cbb5d3a772ac944a869d7268 100644 (file)
@@ -105,7 +105,7 @@ impl BlindedPath {
        ///
        /// [`ForwardTlvs`]: crate::blinded_path::payment::ForwardTlvs
        //  TODO: make all payloads the same size with padding + add dummy hops
-       pub(crate) fn new_for_payment<ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification>(
+       pub fn new_for_payment<ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification>(
                intermediate_nodes: &[payment::ForwardNode], payee_node_id: PublicKey,
                payee_tlvs: payment::ReceiveTlvs, htlc_maximum_msat: u64, entropy_source: &ES,
                secp_ctx: &Secp256k1<T>
index 4edfb7d8de05bf0e0187941cc91bf4f646fe5557..f4df1e379d931b3ac65cbcdae65441c629554c6c 100644 (file)
@@ -8,6 +8,7 @@ use crate::blinded_path::BlindedHop;
 use crate::blinded_path::utils;
 use crate::io;
 use crate::ln::PaymentSecret;
+use crate::ln::channelmanager::CounterpartyForwardingInfo;
 use crate::ln::features::BlindedHopFeatures;
 use crate::ln::msgs::DecodeError;
 use crate::offers::invoice::BlindedPayInfo;
@@ -96,6 +97,15 @@ pub struct PaymentConstraints {
        pub htlc_minimum_msat: u64,
 }
 
+impl From<CounterpartyForwardingInfo> for PaymentRelay {
+       fn from(info: CounterpartyForwardingInfo) -> Self {
+               let CounterpartyForwardingInfo {
+                       fee_base_msat, fee_proportional_millionths, cltv_expiry_delta
+               } = info;
+               Self { cltv_expiry_delta, fee_proportional_millionths, fee_base_msat }
+       }
+}
+
 impl Writeable for ForwardTlvs {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                encode_tlv_stream!(w, {
@@ -118,21 +128,6 @@ impl Writeable for ReceiveTlvs {
        }
 }
 
-// This will be removed once we support forwarding blinded HTLCs, because we'll always read a
-// `BlindedPaymentTlvs` instead.
-impl Readable for ReceiveTlvs {
-       fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
-               _init_and_read_tlv_stream!(r, {
-                       (12, payment_constraints, required),
-                       (65536, payment_secret, required),
-               });
-               Ok(Self {
-                       payment_secret: payment_secret.0.unwrap(),
-                       payment_constraints: payment_constraints.0.unwrap()
-               })
-       }
-}
-
 impl<'a> Writeable for BlindedPaymentTlvsRef<'a> {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                // TODO: write padding
@@ -187,7 +182,7 @@ pub(super) fn blinded_hops<T: secp256k1::Signing + secp256k1::Verification>(
 }
 
 /// `None` if underflow occurs.
-fn amt_to_forward_msat(inbound_amt_msat: u64, payment_relay: &PaymentRelay) -> Option<u64> {
+pub(crate) fn amt_to_forward_msat(inbound_amt_msat: u64, payment_relay: &PaymentRelay) -> Option<u64> {
        let inbound_amt = inbound_amt_msat as u128;
        let base = payment_relay.fee_base_msat as u128;
        let prop = payment_relay.fee_proportional_millionths as u128;
index 2d7f0c18af395b12eff9fa87863c77e57c9567e1..1f42dc2fe4251a26be3c7b659bae89505893b6e3 100644 (file)
@@ -40,7 +40,7 @@ pub trait BroadcasterInterface {
        /// be sure to manage both cases correctly.
        ///
        /// Bitcoin transaction packages are defined in BIP 331 and here:
-       /// https://github.com/bitcoin/bitcoin/blob/master/doc/policy/packages.md
+       /// <https://github.com/bitcoin/bitcoin/blob/master/doc/policy/packages.md>
        fn broadcast_transactions(&self, txs: &[&Transaction]);
 }
 
index 800bee8e3b397433a70c2d03e6a83ae2fccc6436..14544754318ad5945622c42bd0300679aef6a6de 100644 (file)
@@ -29,13 +29,13 @@ use bitcoin::hash_types::{Txid, BlockHash};
 use crate::chain;
 use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
-use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, LATENCY_GRACE_PERIOD_BLOCKS};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
 use crate::chain::transaction::{OutPoint, TransactionData};
-use crate::sign::WriteableEcdsaChannelSigner;
+use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
 use crate::events;
 use crate::events::{Event, EventHandler};
 use crate::util::atomic_counter::AtomicCounter;
-use crate::util::logger::Logger;
+use crate::util::logger::{Logger, WithContext};
 use crate::util::errors::APIError;
 use crate::util::wakers::{Future, Notifier};
 use crate::ln::channelmanager::ChannelDetails;
@@ -359,6 +359,7 @@ where C::Target: chain::Filter,
                process: FN, funding_outpoint: &OutPoint, monitor_state: &MonitorHolder<ChannelSigner>
        ) -> Result<(), ()> where FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs> {
                let monitor = &monitor_state.monitor;
+               let logger = WithChannelMonitor::from(&self.logger, &monitor);
                let mut txn_outputs;
                {
                        txn_outputs = process(monitor, txdata);
@@ -375,12 +376,12 @@ where C::Target: chain::Filter,
                                }
                        }
 
-                       log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
+                       log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
                        match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) {
                                ChannelMonitorUpdateStatus::Completed =>
-                                       log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
+                                       log_trace!(logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
                                ChannelMonitorUpdateStatus::InProgress => {
-                                       log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
+                                       log_debug!(logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
                                        pending_monitor_updates.push(update_id);
                                },
                                ChannelMonitorUpdateStatus::UnrecoverableError => {
@@ -401,7 +402,8 @@ where C::Target: chain::Filter,
                                                outpoint: OutPoint { txid, index: idx as u16 },
                                                script_pubkey: output.script_pubkey,
                                        };
-                                       chain_source.register_output(output)
+                                       log_trace!(logger, "Adding monitoring for spends of outpoint {} to the filter", output.outpoint);
+                                       chain_source.register_output(output);
                                }
                        }
                }
@@ -620,7 +622,7 @@ where C::Target: chain::Filter,
                let monitors = self.monitors.read().unwrap();
                for (_, monitor_holder) in &*monitors {
                        monitor_holder.monitor.rebroadcast_pending_claims(
-                               &*self.broadcaster, &*self.fee_estimator, &*self.logger
+                               &*self.broadcaster, &*self.fee_estimator, &self.logger
                        )
                }
        }
@@ -639,7 +641,7 @@ where
                log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
                self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| {
                        monitor.block_connected(
-                               header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
+                               header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger)
                });
        }
 
@@ -648,7 +650,7 @@ where
                log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height);
                for monitor_state in monitor_states.values() {
                        monitor_state.monitor.block_disconnected(
-                               header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
+                               header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger);
                }
        }
 }
@@ -666,7 +668,7 @@ where
                log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash());
                self.process_chain_data(header, None, txdata, |monitor, txdata| {
                        monitor.transactions_confirmed(
-                               header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
+                               header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger)
                });
        }
 
@@ -674,7 +676,7 @@ where
                log_debug!(self.logger, "Transaction {} reorganized out of chain", txid);
                let monitor_states = self.monitors.read().unwrap();
                for monitor_state in monitor_states.values() {
-                       monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
+                       monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &self.logger);
                }
        }
 
@@ -685,7 +687,8 @@ where
                        // it's still possible if a chain::Filter implementation returns a transaction.
                        debug_assert!(txdata.is_empty());
                        monitor.best_block_updated(
-                               header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger)
+                               header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger
+                       )
                });
        }
 
@@ -711,34 +714,35 @@ where C::Target: chain::Filter,
            P::Target: Persist<ChannelSigner>,
 {
        fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<ChannelMonitorUpdateStatus, ()> {
+               let logger = WithChannelMonitor::from(&self.logger, &monitor);
                let mut monitors = self.monitors.write().unwrap();
                let entry = match monitors.entry(funding_outpoint) {
                        hash_map::Entry::Occupied(_) => {
-                               log_error!(self.logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
+                               log_error!(logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
                                return Err(());
                        },
                        hash_map::Entry::Vacant(e) => e,
                };
-               log_trace!(self.logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
+               log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
                let update_id = MonitorUpdateId::from_new_monitor(&monitor);
                let mut pending_monitor_updates = Vec::new();
                let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id);
                match persist_res {
                        ChannelMonitorUpdateStatus::InProgress => {
-                               log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
+                               log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
                                pending_monitor_updates.push(update_id);
                        },
                        ChannelMonitorUpdateStatus::Completed => {
-                               log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
+                               log_info!(logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
                        },
                        ChannelMonitorUpdateStatus::UnrecoverableError => {
                                let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
-                               log_error!(self.logger, "{}", err_str);
+                               log_error!(logger, "{}", err_str);
                                panic!("{}", err_str);
                        },
                }
                if let Some(ref chain_source) = self.chain_source {
-                       monitor.load_outputs_to_watch(chain_source);
+                       monitor.load_outputs_to_watch(chain_source , &self.logger);
                }
                entry.insert(MonitorHolder {
                        monitor,
@@ -751,9 +755,10 @@ where C::Target: chain::Filter,
        fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
                // Update the monitor that watches the channel referred to by the given outpoint.
                let monitors = self.monitors.read().unwrap();
-               let ret = match monitors.get(&funding_txo) {
+               match monitors.get(&funding_txo) {
                        None => {
-                               log_error!(self.logger, "Failed to update channel monitor: no such monitor registered");
+                               let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(funding_txo.to_channel_id()));
+                               log_error!(logger, "Failed to update channel monitor: no such monitor registered");
 
                                // We should never ever trigger this from within ChannelManager. Technically a
                                // user could use this object with some proxying in between which makes this
@@ -765,7 +770,8 @@ where C::Target: chain::Filter,
                        },
                        Some(monitor_state) => {
                                let monitor = &monitor_state.monitor;
-                               log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
+                               let logger = WithChannelMonitor::from(&self.logger, &monitor);
+                               log_trace!(logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
                                let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger);
 
                                let update_id = MonitorUpdateId::from_monitor_update(update);
@@ -776,7 +782,7 @@ where C::Target: chain::Filter,
                                        // We don't want to persist a `monitor_update` which results in a failure to apply later
                                        // while reading `channel_monitor` with updates from storage. Instead, we should persist
                                        // the entire `channel_monitor` here.
-                                       log_warn!(self.logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor));
+                                       log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor));
                                        self.persister.update_persisted_channel(funding_txo, None, monitor, update_id)
                                } else {
                                        self.persister.update_persisted_channel(funding_txo, Some(update), monitor, update_id)
@@ -784,12 +790,21 @@ where C::Target: chain::Filter,
                                match persist_res {
                                        ChannelMonitorUpdateStatus::InProgress => {
                                                pending_monitor_updates.push(update_id);
-                                               log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} in progress", log_funding_info!(monitor));
+                                               log_debug!(logger, "Persistence of ChannelMonitorUpdate for channel {} in progress", log_funding_info!(monitor));
                                        },
                                        ChannelMonitorUpdateStatus::Completed => {
-                                               log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} completed", log_funding_info!(monitor));
+                                               log_debug!(logger, "Persistence of ChannelMonitorUpdate for channel {} completed", log_funding_info!(monitor));
+                                       },
+                                       ChannelMonitorUpdateStatus::UnrecoverableError => {
+                                               // Take the monitors lock for writing so that we poison it and any future
+                                               // operations going forward fail immediately.
+                                               core::mem::drop(pending_monitor_updates);
+                                               core::mem::drop(monitors);
+                                               let _poison = self.monitors.write().unwrap();
+                                               let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
+                                               log_error!(logger, "{}", err_str);
+                                               panic!("{}", err_str);
                                        },
-                                       ChannelMonitorUpdateStatus::UnrecoverableError => { /* we'll panic in a moment */ },
                                }
                                if update_res.is_err() {
                                        ChannelMonitorUpdateStatus::InProgress
@@ -797,28 +812,19 @@ where C::Target: chain::Filter,
                                        persist_res
                                }
                        }
-               };
-               if let ChannelMonitorUpdateStatus::UnrecoverableError = ret {
-                       // Take the monitors lock for writing so that we poison it and any future
-                       // operations going forward fail immediately.
-                       core::mem::drop(monitors);
-                       let _poison = self.monitors.write().unwrap();
-                       let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
-                       log_error!(self.logger, "{}", err_str);
-                       panic!("{}", err_str);
                }
-               ret
        }
 
        fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
                let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
                for monitor_state in self.monitors.read().unwrap().values() {
+                       let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor);
                        let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap());
                        if !is_pending_monitor_update || monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize <= self.highest_chain_height.load(Ordering::Acquire) {
                                if is_pending_monitor_update {
-                                       log_error!(self.logger, "A ChannelMonitor sync took longer than {} blocks to complete.", LATENCY_GRACE_PERIOD_BLOCKS);
-                                       log_error!(self.logger, "   To avoid funds-loss, we are allowing monitor updates to be released.");
-                                       log_error!(self.logger, "   This may cause duplicate payment events to be generated.");
+                                       log_error!(logger, "A ChannelMonitor sync took longer than {} blocks to complete.", LATENCY_GRACE_PERIOD_BLOCKS);
+                                       log_error!(logger, "   To avoid funds-loss, we are allowing monitor updates to be released.");
+                                       log_error!(logger, "   This may cause duplicate payment events to be generated.");
                                }
                                let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
                                if monitor_events.len() > 0 {
index 82af154315fac3d8ec0069bc7be20d5cd425834b..bcc324a5582af9d0f6c399034d7cf4e0b2327161 100644 (file)
@@ -34,7 +34,7 @@ use bitcoin::secp256k1;
 use bitcoin::sighash::EcdsaSighashType;
 
 use crate::ln::channel::INITIAL_COMMITMENT_NUMBER;
-use crate::ln::{PaymentHash, PaymentPreimage};
+use crate::ln::{PaymentHash, PaymentPreimage, ChannelId};
 use crate::ln::msgs::DecodeError;
 use crate::ln::channel_keys::{DelayedPaymentKey, DelayedPaymentBasepoint, HtlcBasepoint, HtlcKey, RevocationKey, RevocationBasepoint};
 use crate::ln::chan_utils::{self,CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCClaim, ChannelTransactionParameters, HolderCommitmentTransaction, TxCreationKeys};
@@ -43,11 +43,11 @@ use crate::chain;
 use crate::chain::{BestBlock, WatchedOutput};
 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator};
 use crate::chain::transaction::{OutPoint, TransactionData};
-use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, WriteableEcdsaChannelSigner, SignerProvider, EntropySource};
+use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, ecdsa::WriteableEcdsaChannelSigner, SignerProvider, EntropySource};
 use crate::chain::onchaintx::{ClaimEvent, OnchainTxHandler};
 use crate::chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedOutput, RevokedHTLCOutput};
 use crate::chain::Filter;
-use crate::util::logger::Logger;
+use crate::util::logger::{Logger, Record};
 use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48};
 use crate::util::byte_utils;
 use crate::events::{Event, EventHandler};
@@ -71,6 +71,15 @@ use crate::sync::{Mutex, LockTestExt};
 #[must_use]
 pub struct ChannelMonitorUpdate {
        pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
+       /// Historically, [`ChannelMonitor`]s didn't know their counterparty node id. However,
+       /// `ChannelManager` really wants to know it so that it can easily look up the corresponding
+       /// channel. For now, this results in a temporary map in `ChannelManager` to look up channels
+       /// by only the funding outpoint.
+       ///
+       /// To eventually remove that, we repeat the counterparty node id here so that we can upgrade
+       /// `ChannelMonitor`s to become aware of the counterparty node id if they were generated prior
+       /// to when it was stored directly in them.
+       pub(crate) counterparty_node_id: Option<PublicKey>,
        /// The sequence number of this update. Updates *must* be replayed in-order according to this
        /// sequence number (and updates may panic if they are not). The update_id values are strictly
        /// increasing and increase by one for each new update, with two exceptions specified below.
@@ -107,7 +116,9 @@ impl Writeable for ChannelMonitorUpdate {
                for update_step in self.updates.iter() {
                        update_step.write(w)?;
                }
-               write_tlv_fields!(w, {});
+               write_tlv_fields!(w, {
+                       (1, self.counterparty_node_id, option),
+               });
                Ok(())
        }
 }
@@ -122,8 +133,11 @@ impl Readable for ChannelMonitorUpdate {
                                updates.push(upd);
                        }
                }
-               read_tlv_fields!(r, {});
-               Ok(Self { update_id, updates })
+               let mut counterparty_node_id = None;
+               read_tlv_fields!(r, {
+                       (1, counterparty_node_id, option),
+               });
+               Ok(Self { update_id, counterparty_node_id, updates })
        }
 }
 
@@ -1125,6 +1139,34 @@ macro_rules! _process_events_body {
 }
 pub(super) use _process_events_body as process_events_body;
 
+pub(crate) struct WithChannelMonitor<'a, L: Deref> where L::Target: Logger {
+       logger: &'a L,
+       peer_id: Option<PublicKey>,
+       channel_id: Option<ChannelId>,
+}
+
+impl<'a, L: Deref> Logger for WithChannelMonitor<'a, L> where L::Target: Logger {
+       fn log(&self, mut record: Record) {
+               record.peer_id = self.peer_id;
+               record.channel_id = self.channel_id;
+               self.logger.log(record)
+       }
+}
+
+impl<'a, L: Deref> WithChannelMonitor<'a, L> where L::Target: Logger {
+       pub(crate) fn from<S: WriteableEcdsaChannelSigner>(logger: &'a L, monitor: &ChannelMonitor<S>) -> Self {
+               Self::from_impl(logger, &*monitor.inner.lock().unwrap())
+       }
+
+       pub(crate) fn from_impl<S: WriteableEcdsaChannelSigner>(logger: &'a L, monitor_impl: &ChannelMonitorImpl<S>) -> Self {
+               let peer_id = monitor_impl.counterparty_node_id;
+               let channel_id = Some(monitor_impl.funding_info.0.to_channel_id());
+               WithChannelMonitor {
+                       logger, peer_id, channel_id,
+               }
+       }
+}
+
 impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        /// For lockorder enforcement purposes, we need to have a single site which constructs the
        /// `inner` mutex, otherwise cases where we lock two monitors at the same time (eg in our
@@ -1258,9 +1300,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        )
        where L::Target: Logger
        {
-               self.inner.lock().unwrap().provide_initial_counterparty_commitment_tx(txid,
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.provide_initial_counterparty_commitment_tx(txid,
                        htlc_outputs, commitment_number, their_cur_per_commitment_point, feerate_per_kw,
-                       to_broadcaster_value_sat, to_countersignatory_value_sat, logger);
+                       to_broadcaster_value_sat, to_countersignatory_value_sat, &logger);
        }
 
        /// Informs this monitor of the latest counterparty (ie non-broadcastable) commitment transaction.
@@ -1276,8 +1320,10 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                their_per_commitment_point: PublicKey,
                logger: &L,
        ) where L::Target: Logger {
-               self.inner.lock().unwrap().provide_latest_counterparty_commitment_tx(
-                       txid, htlc_outputs, commitment_number, their_per_commitment_point, logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.provide_latest_counterparty_commitment_tx(
+                       txid, htlc_outputs, commitment_number, their_per_commitment_point, &logger)
        }
 
        #[cfg(test)]
@@ -1302,8 +1348,10 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                F::Target: FeeEstimator,
                L::Target: Logger,
        {
-               self.inner.lock().unwrap().provide_payment_preimage(
-                       payment_hash, payment_preimage, broadcaster, fee_estimator, logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.provide_payment_preimage(
+                       payment_hash, payment_preimage, broadcaster, fee_estimator, &logger)
        }
 
        /// Updates a ChannelMonitor on the basis of some new information provided by the Channel
@@ -1322,7 +1370,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                F::Target: FeeEstimator,
                L::Target: Logger,
        {
-               self.inner.lock().unwrap().update_monitor(updates, broadcaster, fee_estimator, logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.update_monitor(updates, broadcaster, fee_estimator, &logger)
        }
 
        /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this
@@ -1346,15 +1396,22 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        /// Loads the funding txo and outputs to watch into the given `chain::Filter` by repeatedly
        /// calling `chain::Filter::register_output` and `chain::Filter::register_tx` until all outputs
        /// have been registered.
-       pub fn load_outputs_to_watch<F: Deref>(&self, filter: &F) where F::Target: chain::Filter {
+       pub fn load_outputs_to_watch<F: Deref, L: Deref>(&self, filter: &F, logger: &L) 
+       where 
+               F::Target: chain::Filter, L::Target: Logger,
+       {
                let lock = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*lock);
+               log_trace!(&logger, "Registering funding outpoint {}", &lock.get_funding_txo().0);
                filter.register_tx(&lock.get_funding_txo().0.txid, &lock.get_funding_txo().1);
                for (txid, outputs) in lock.get_outputs_to_watch().iter() {
                        for (index, script_pubkey) in outputs.iter() {
                                assert!(*index <= u16::max_value() as u32);
+                               let outpoint = OutPoint { txid: *txid, index: *index as u16 };
+                               log_trace!(logger, "Registering outpoint {} with the filter for monitoring spends", outpoint);
                                filter.register_output(WatchedOutput {
                                        block_hash: None,
-                                       outpoint: OutPoint { txid: *txid, index: *index as u16 },
+                                       outpoint,
                                        script_pubkey: script_pubkey.clone(),
                                });
                        }
@@ -1459,7 +1516,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        /// to the commitment transaction being revoked, this will return a signed transaction, but
        /// the signature will not be valid.
        ///
-       /// [`EcdsaChannelSigner::sign_justice_revoked_output`]: crate::sign::EcdsaChannelSigner::sign_justice_revoked_output
+       /// [`EcdsaChannelSigner::sign_justice_revoked_output`]: crate::sign::ecdsa::EcdsaChannelSigner::sign_justice_revoked_output
        /// [`Persist`]: crate::chain::chainmonitor::Persist
        pub fn sign_to_local_justice_tx(&self, justice_tx: Transaction, input_idx: usize, value: u64, commitment_number: u64) -> Result<Transaction, ()> {
                self.inner.lock().unwrap().sign_to_local_justice_tx(justice_tx, input_idx, value, commitment_number)
@@ -1501,7 +1558,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
        pub fn get_latest_holder_commitment_txn<L: Deref>(&self, logger: &L) -> Vec<Transaction>
        where L::Target: Logger {
-               self.inner.lock().unwrap().get_latest_holder_commitment_txn(logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.get_latest_holder_commitment_txn(&logger)
        }
 
        /// Unsafe test-only version of get_latest_holder_commitment_txn used by our test framework
@@ -1510,7 +1569,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        #[cfg(any(test, feature = "unsafe_revoked_tx_signing"))]
        pub fn unsafe_get_latest_holder_commitment_txn<L: Deref>(&self, logger: &L) -> Vec<Transaction>
        where L::Target: Logger {
-               self.inner.lock().unwrap().unsafe_get_latest_holder_commitment_txn(logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.unsafe_get_latest_holder_commitment_txn(&logger)
        }
 
        /// Processes transactions in a newly connected block, which may result in any of the following:
@@ -1531,15 +1592,17 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                height: u32,
                broadcaster: B,
                fee_estimator: F,
-               logger: L,
+               logger: &L,
        ) -> Vec<TransactionOutputs>
        where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
                L::Target: Logger,
        {
-               self.inner.lock().unwrap().block_connected(
-                       header, txdata, height, broadcaster, fee_estimator, logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.block_connected(
+                       header, txdata, height, broadcaster, fee_estimator, &logger)
        }
 
        /// Determines if the disconnected block contained any transactions of interest and updates
@@ -1550,14 +1613,16 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                height: u32,
                broadcaster: B,
                fee_estimator: F,
-               logger: L,
+               logger: &L,
        ) where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
                L::Target: Logger,
        {
-               self.inner.lock().unwrap().block_disconnected(
-                       header, height, broadcaster, fee_estimator, logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.block_disconnected(
+                       header, height, broadcaster, fee_estimator, &logger)
        }
 
        /// Processes transactions confirmed in a block with the given header and height, returning new
@@ -1574,7 +1639,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                height: u32,
                broadcaster: B,
                fee_estimator: F,
-               logger: L,
+               logger: &L,
        ) -> Vec<TransactionOutputs>
        where
                B::Target: BroadcasterInterface,
@@ -1582,8 +1647,10 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                L::Target: Logger,
        {
                let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
-               self.inner.lock().unwrap().transactions_confirmed(
-                       header, txdata, height, broadcaster, &bounded_fee_estimator, logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.transactions_confirmed(
+                       header, txdata, height, broadcaster, &bounded_fee_estimator, &logger)
        }
 
        /// Processes a transaction that was reorganized out of the chain.
@@ -1597,15 +1664,18 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                txid: &Txid,
                broadcaster: B,
                fee_estimator: F,
-               logger: L,
+               logger: &L,
        ) where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
                L::Target: Logger,
        {
                let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
-               self.inner.lock().unwrap().transaction_unconfirmed(
-                       txid, broadcaster, &bounded_fee_estimator, logger);
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.transaction_unconfirmed(
+                       txid, broadcaster, &bounded_fee_estimator, &logger
+               );
        }
 
        /// Updates the monitor with the current best chain tip, returning new outputs to watch. See
@@ -1621,7 +1691,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                height: u32,
                broadcaster: B,
                fee_estimator: F,
-               logger: L,
+               logger: &L,
        ) -> Vec<TransactionOutputs>
        where
                B::Target: BroadcasterInterface,
@@ -1629,8 +1699,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                L::Target: Logger,
        {
                let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
-               self.inner.lock().unwrap().best_block_updated(
-                       header, height, broadcaster, &bounded_fee_estimator, logger)
+               let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
+               inner.best_block_updated(
+                       header, height, broadcaster, &bounded_fee_estimator, &logger
+               )
        }
 
        /// Returns the set of txids that should be monitored for re-organization out of the chain.
@@ -1658,7 +1731,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        /// invoking this every 30 seconds, or lower if running in an environment with spotty
        /// connections, like on mobile.
        pub fn rebroadcast_pending_claims<B: Deref, F: Deref, L: Deref>(
-               &self, broadcaster: B, fee_estimator: F, logger: L,
+               &self, broadcaster: B, fee_estimator: F, logger: &L,
        )
        where
                B::Target: BroadcasterInterface,
@@ -1667,6 +1740,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        {
                let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
                let mut inner = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*inner);
                let current_height = inner.best_block.height;
                inner.onchain_tx_handler.rebroadcast_pending_claims(
                        current_height, &broadcaster, &fee_estimator, &logger,
@@ -2231,7 +2305,7 @@ macro_rules! fail_unbroadcast_htlcs {
                                                        // broadcastable commitment transaction has the HTLC in it, but it
                                                        // cannot currently change after channel initialization, so we don't
                                                        // need to here.
-                                                       let confirmed_htlcs_iter: &mut Iterator<Item = (&HTLCOutputInCommitment, Option<&HTLCSource>)> = &mut $confirmed_htlcs_list;
+                                                       let confirmed_htlcs_iter: &mut dyn Iterator<Item = (&HTLCOutputInCommitment, Option<&HTLCSource>)> = &mut $confirmed_htlcs_list;
 
                                                        let mut matched_htlc = false;
                                                        for (ref broadcast_htlc, ref broadcast_source) in confirmed_htlcs_iter {
@@ -2375,13 +2449,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                Ok(())
        }
 
-       pub(crate) fn provide_initial_counterparty_commitment_tx<L: Deref>(
+       fn provide_initial_counterparty_commitment_tx<L: Deref>(
                &mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
                commitment_number: u64, their_per_commitment_point: PublicKey, feerate_per_kw: u32,
-               to_broadcaster_value: u64, to_countersignatory_value: u64, logger: &L
-       )
-       where L::Target: Logger
-       {
+               to_broadcaster_value: u64, to_countersignatory_value: u64, logger: &WithChannelMonitor<L>,
+       ) where L::Target: Logger {
                self.initial_counterparty_commitment_info = Some((their_per_commitment_point.clone(),
                        feerate_per_kw, to_broadcaster_value, to_countersignatory_value));
 
@@ -2394,7 +2466,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                their_per_commitment_point, logger);
        }
 
-       pub(crate) fn provide_latest_counterparty_commitment_tx<L: Deref>(&mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>, commitment_number: u64, their_per_commitment_point: PublicKey, logger: &L) where L::Target: Logger {
+       fn provide_latest_counterparty_commitment_tx<L: Deref>(
+               &mut self, txid: Txid,
+               htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
+               commitment_number: u64, their_per_commitment_point: PublicKey, logger: &WithChannelMonitor<L>,
+       ) where L::Target: Logger {
                // TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction
                // so that a remote monitor doesn't learn anything unless there is a malicious close.
                // (only maybe, sadly we cant do the same for local info, as we need to be aware of
@@ -2527,7 +2603,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
        /// commitment_tx_infos which contain the payment hash have been revoked.
        fn provide_payment_preimage<B: Deref, F: Deref, L: Deref>(
                &mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, broadcaster: &B,
-               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &WithChannelMonitor<L>)
        where B::Target: BroadcasterInterface,
                    F::Target: FeeEstimator,
                    L::Target: Logger,
@@ -2604,21 +2680,64 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                }
        }
 
-       pub(crate) fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(&mut self, broadcaster: &B, logger: &L)
-               where B::Target: BroadcasterInterface,
-                                       L::Target: Logger,
-       {
-               let commit_txs = self.get_latest_holder_commitment_txn(logger);
-               let mut txs = vec![];
-               for tx in commit_txs.iter() {
-                       log_info!(logger, "Broadcasting local {}", log_tx!(tx));
-                       txs.push(tx);
-               }
-               broadcaster.broadcast_transactions(&txs);
+       fn generate_claimable_outpoints_and_watch_outputs(&mut self) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
+               let funding_outp = HolderFundingOutput::build(
+                       self.funding_redeemscript.clone(),
+                       self.channel_value_satoshis,
+                       self.onchain_tx_handler.channel_type_features().clone()
+               );
+               let commitment_package = PackageTemplate::build_package(
+                       self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
+                       PackageSolvingData::HolderFundingOutput(funding_outp),
+                       self.best_block.height(), self.best_block.height()
+               );
+               let mut claimable_outpoints = vec![commitment_package];
                self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
+               // Although we aren't signing the transaction directly here, the transaction will be signed
+               // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
+               // new channel updates.
+               self.holder_tx_signed = true;
+               let mut watch_outputs = Vec::new();
+               // We can't broadcast our HTLC transactions while the commitment transaction is
+               // unconfirmed. We'll delay doing so until we detect the confirmed commitment in
+               // `transactions_confirmed`.
+               if !self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
+                       // Because we're broadcasting a commitment transaction, we should construct the package
+                       // assuming it gets confirmed in the next block. Sadly, we have code which considers
+                       // "not yet confirmed" things as discardable, so we cannot do that here.
+                       let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(
+                               &self.current_holder_commitment_tx, self.best_block.height()
+                       );
+                       let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
+                       let new_outputs = self.get_broadcasted_holder_watch_outputs(
+                               &self.current_holder_commitment_tx, &unsigned_commitment_tx
+                       );
+                       if !new_outputs.is_empty() {
+                               watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
+                       }
+                       claimable_outpoints.append(&mut new_outpoints);
+               }
+               (claimable_outpoints, watch_outputs)
+       }
+
+       pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast<B: Deref, F: Deref, L: Deref>(
+               &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &WithChannelMonitor<L>
+       )
+       where
+               B::Target: BroadcasterInterface,
+               F::Target: FeeEstimator,
+               L::Target: Logger,
+       {
+               let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs();
+               self.onchain_tx_handler.update_claims_view_from_requests(
+                       claimable_outpoints, self.best_block.height(), self.best_block.height(), broadcaster,
+                       fee_estimator, logger
+               );
        }
 
-       pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), ()>
+       fn update_monitor<B: Deref, F: Deref, L: Deref>(
+               &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithChannelMonitor<L>
+       ) -> Result<(), ()>
        where B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
                L::Target: Logger,
@@ -2633,6 +2752,15 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                        log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} change(s).",
                                log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
                }
+
+               if updates.counterparty_node_id.is_some() {
+                       if self.counterparty_node_id.is_none() {
+                               self.counterparty_node_id = updates.counterparty_node_id;
+                       } else {
+                               debug_assert_eq!(self.counterparty_node_id, updates.counterparty_node_id);
+                       }
+               }
+
                // ChannelMonitor updates may be applied after force close if we receive a preimage for a
                // broadcasted commitment transaction HTLC output that we'd like to claim on-chain. If this
                // is the case, we no longer have guaranteed access to the monitor's update ID, so we use a
@@ -2703,26 +2831,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                                        log_trace!(logger, "Avoiding commitment broadcast, already detected confirmed spend onchain");
                                                        continue;
                                                }
-                                               self.broadcast_latest_holder_commitment_txn(broadcaster, logger);
-                                               // If the channel supports anchor outputs, we'll need to emit an external
-                                               // event to be consumed such that a child transaction is broadcast with a
-                                               // high enough feerate for the parent commitment transaction to confirm.
-                                               if self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
-                                                       let funding_output = HolderFundingOutput::build(
-                                                               self.funding_redeemscript.clone(), self.channel_value_satoshis,
-                                                               self.onchain_tx_handler.channel_type_features().clone(),
-                                                       );
-                                                       let best_block_height = self.best_block.height();
-                                                       let commitment_package = PackageTemplate::build_package(
-                                                               self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
-                                                               PackageSolvingData::HolderFundingOutput(funding_output),
-                                                               best_block_height, best_block_height
-                                                       );
-                                                       self.onchain_tx_handler.update_claims_view_from_requests(
-                                                               vec![commitment_package], best_block_height, best_block_height,
-                                                               broadcaster, &bounded_fee_estimator, logger,
-                                                       );
-                                               }
+                                               self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger);
                                        } else if !self.holder_tx_signed {
                                                log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
                                                log_error!(logger, "    in channel monitor for channel {}!", &self.funding_info.0.to_channel_id());
@@ -2763,15 +2872,15 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                } else { ret }
        }
 
-       pub fn get_latest_update_id(&self) -> u64 {
+       fn get_latest_update_id(&self) -> u64 {
                self.latest_update_id
        }
 
-       pub fn get_funding_txo(&self) -> &(OutPoint, ScriptBuf) {
+       fn get_funding_txo(&self) -> &(OutPoint, ScriptBuf) {
                &self.funding_info
        }
 
-       pub fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<(u32, ScriptBuf)>> {
+       fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<(u32, ScriptBuf)>> {
                // If we've detected a counterparty commitment tx on chain, we must include it in the set
                // of outputs to watch for spends of, otherwise we're likely to lose user funds. Because
                // its trivial to do, double-check that here.
@@ -2781,7 +2890,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                &self.outputs_to_watch
        }
 
-       pub fn get_and_clear_pending_monitor_events(&mut self) -> Vec<MonitorEvent> {
+       fn get_and_clear_pending_monitor_events(&mut self) -> Vec<MonitorEvent> {
                let mut ret = Vec::new();
                mem::swap(&mut ret, &mut self.pending_monitor_events);
                ret
@@ -2856,7 +2965,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                ret
        }
 
-       pub(crate) fn initial_counterparty_commitment_tx(&mut self) -> Option<CommitmentTransaction> {
+       fn initial_counterparty_commitment_tx(&mut self) -> Option<CommitmentTransaction> {
                let (their_per_commitment_point, feerate_per_kw, to_broadcaster_value,
                        to_countersignatory_value) = self.initial_counterparty_commitment_info?;
                let htlc_outputs = vec![];
@@ -2890,7 +2999,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                        channel_parameters)
        }
 
-       pub(crate) fn counterparty_commitment_txs_from_update(&self, update: &ChannelMonitorUpdate) -> Vec<CommitmentTransaction> {
+       fn counterparty_commitment_txs_from_update(&self, update: &ChannelMonitorUpdate) -> Vec<CommitmentTransaction> {
                update.updates.iter().filter_map(|update| {
                        match update {
                                &ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid,
@@ -2916,7 +3025,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                }).collect()
        }
 
-       pub(crate) fn sign_to_local_justice_tx(
+       fn sign_to_local_justice_tx(
                &self, mut justice_tx: Transaction, input_idx: usize, value: u64, commitment_number: u64
        ) -> Result<Transaction, ()> {
                let secret = self.get_secret(commitment_number).ok_or(())?;
@@ -2944,15 +3053,15 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                self.commitment_secrets.get_secret(idx)
        }
 
-       pub(crate) fn get_min_seen_secret(&self) -> u64 {
+       fn get_min_seen_secret(&self) -> u64 {
                self.commitment_secrets.get_min_seen_secret()
        }
 
-       pub(crate) fn get_cur_counterparty_commitment_number(&self) -> u64 {
+       fn get_cur_counterparty_commitment_number(&self) -> u64 {
                self.current_counterparty_commitment_number
        }
 
-       pub(crate) fn get_cur_holder_commitment_number(&self) -> u64 {
+       fn get_cur_holder_commitment_number(&self) -> u64 {
                self.current_holder_commitment_number
        }
 
@@ -3299,7 +3408,61 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                }
        }
 
-       pub fn get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
+       /// Cancels any existing pending claims for a commitment that previously confirmed and has now
+       /// been replaced by another.
+       pub fn cancel_prev_commitment_claims<L: Deref>(
+               &mut self, logger: &L, confirmed_commitment_txid: &Txid
+       ) where L::Target: Logger {
+               for (counterparty_commitment_txid, _) in &self.counterparty_commitment_txn_on_chain {
+                       // Cancel any pending claims for counterparty commitments we've seen confirm.
+                       if counterparty_commitment_txid == confirmed_commitment_txid {
+                               continue;
+                       }
+                       for (htlc, _) in self.counterparty_claimable_outpoints.get(counterparty_commitment_txid).unwrap_or(&vec![]) {
+                               log_trace!(logger, "Canceling claims for previously confirmed counterparty commitment {}",
+                                       counterparty_commitment_txid);
+                               let mut outpoint = BitcoinOutPoint { txid: *counterparty_commitment_txid, vout: 0 };
+                               if let Some(vout) = htlc.transaction_output_index {
+                                       outpoint.vout = vout;
+                                       self.onchain_tx_handler.abandon_claim(&outpoint);
+                               }
+                       }
+               }
+               if self.holder_tx_signed {
+                       // If we've signed, we may have broadcast either commitment (prev or current), and
+                       // attempted to claim from it immediately without waiting for a confirmation.
+                       if self.current_holder_commitment_tx.txid != *confirmed_commitment_txid {
+                               log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}",
+                                       self.current_holder_commitment_tx.txid);
+                               let mut outpoint = BitcoinOutPoint { txid: self.current_holder_commitment_tx.txid, vout: 0 };
+                               for (htlc, _, _) in &self.current_holder_commitment_tx.htlc_outputs {
+                                       if let Some(vout) = htlc.transaction_output_index {
+                                               outpoint.vout = vout;
+                                               self.onchain_tx_handler.abandon_claim(&outpoint);
+                                       }
+                               }
+                       }
+                       if let Some(prev_holder_commitment_tx) = &self.prev_holder_signed_commitment_tx {
+                               if prev_holder_commitment_tx.txid != *confirmed_commitment_txid {
+                                       log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}",
+                                               prev_holder_commitment_tx.txid);
+                                       let mut outpoint = BitcoinOutPoint { txid: prev_holder_commitment_tx.txid, vout: 0 };
+                                       for (htlc, _, _) in &prev_holder_commitment_tx.htlc_outputs {
+                                               if let Some(vout) = htlc.transaction_output_index {
+                                                       outpoint.vout = vout;
+                                                       self.onchain_tx_handler.abandon_claim(&outpoint);
+                                               }
+                                       }
+                               }
+                       }
+               } else {
+                       // No previous claim.
+               }
+       }
+
+       fn get_latest_holder_commitment_txn<L: Deref>(
+               &mut self, logger: &WithChannelMonitor<L>,
+       ) -> Vec<Transaction> where L::Target: Logger {
                log_debug!(logger, "Getting signed latest holder commitment transaction!");
                self.holder_tx_signed = true;
                let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
@@ -3338,7 +3501,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
 
        #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
        /// Note that this includes possibly-locktimed-in-the-future transactions!
-       fn unsafe_get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
+       fn unsafe_get_latest_holder_commitment_txn<L: Deref>(
+               &mut self, logger: &WithChannelMonitor<L>
+       ) -> Vec<Transaction> where L::Target: Logger {
                log_debug!(logger, "Getting signed copy of latest holder commitment transaction!");
                let commitment_tx = self.onchain_tx_handler.get_fully_signed_copy_holder_tx(&self.funding_redeemscript);
                let txid = commitment_tx.txid();
@@ -3365,10 +3530,13 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                holder_transactions
        }
 
-       pub fn block_connected<B: Deref, F: Deref, L: Deref>(&mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: F, logger: L) -> Vec<TransactionOutputs>
+       fn block_connected<B: Deref, F: Deref, L: Deref>(
+               &mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B,
+               fee_estimator: F, logger: &WithChannelMonitor<L>,
+       ) -> Vec<TransactionOutputs>
                where B::Target: BroadcasterInterface,
-                     F::Target: FeeEstimator,
-                                       L::Target: Logger,
+                       F::Target: FeeEstimator,
+                       L::Target: Logger,
        {
                let block_hash = header.block_hash();
                self.best_block = BestBlock::new(block_hash, height);
@@ -3383,7 +3551,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                height: u32,
                broadcaster: B,
                fee_estimator: &LowerBoundedFeeEstimator<F>,
-               logger: L,
+               logger: &WithChannelMonitor<L>,
        ) -> Vec<TransactionOutputs>
        where
                B::Target: BroadcasterInterface,
@@ -3394,9 +3562,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
 
                if height > self.best_block.height() {
                        self.best_block = BestBlock::new(block_hash, height);
-                       self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, &logger)
+                       log_trace!(logger, "Connecting new block {} at height {}", block_hash, height);
+                       self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, logger)
                } else if block_hash != self.best_block.block_hash() {
                        self.best_block = BestBlock::new(block_hash, height);
+                       log_trace!(logger, "Best block re-orged, replaced with new block {} at height {}", block_hash, height);
                        self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height);
                        self.onchain_tx_handler.block_disconnected(height + 1, broadcaster, fee_estimator, logger);
                        Vec::new()
@@ -3410,7 +3580,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                height: u32,
                broadcaster: B,
                fee_estimator: &LowerBoundedFeeEstimator<F>,
-               logger: L,
+               logger: &WithChannelMonitor<L>,
        ) -> Vec<TransactionOutputs>
        where
                B::Target: BroadcasterInterface,
@@ -3433,6 +3603,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                let mut claimable_outpoints = Vec::new();
                'tx_iter: for tx in &txn_matched {
                        let txid = tx.txid();
+                       log_trace!(logger, "Transaction {} confirmed in block {}", txid , block_hash);
                        // If a transaction has already been confirmed, ensure we don't bother processing it duplicatively.
                        if Some(txid) == self.funding_spend_confirmed {
                                log_debug!(logger, "Skipping redundant processing of funding-spend tx {} as it was previously confirmed", txid);
@@ -3504,6 +3675,10 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                                        commitment_tx_to_counterparty_output,
                                                },
                                        });
+                                       // Now that we've detected a confirmed commitment transaction, attempt to cancel
+                                       // pending claims for any commitments that were previously confirmed such that
+                                       // we don't continue claiming inputs that no longer exist.
+                                       self.cancel_prev_commitment_claims(&logger, &txid);
                                }
                        }
                        if tx.input.len() >= 1 {
@@ -3527,9 +3702,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                                break;
                                        }
                                }
-                               self.is_resolving_htlc_output(&tx, height, &block_hash, &logger);
+                               self.is_resolving_htlc_output(&tx, height, &block_hash, logger);
 
-                               self.check_tx_and_push_spendable_outputs(&tx, height, &block_hash, &logger);
+                               self.check_tx_and_push_spendable_outputs(&tx, height, &block_hash, logger);
                        }
                }
 
@@ -3537,7 +3712,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                        self.best_block = BestBlock::new(block_hash, height);
                }
 
-               self.block_confirmed(height, block_hash, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, &logger)
+               self.block_confirmed(height, block_hash, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, logger)
        }
 
        /// Update state for new block(s)/transaction(s) confirmed. Note that the caller must update
@@ -3557,7 +3732,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                mut claimable_outpoints: Vec<PackageTemplate>,
                broadcaster: &B,
                fee_estimator: &LowerBoundedFeeEstimator<F>,
-               logger: &L,
+               logger: &WithChannelMonitor<L>,
        ) -> Vec<TransactionOutputs>
        where
                B::Target: BroadcasterInterface,
@@ -3569,29 +3744,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
 
                let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
                if should_broadcast {
-                       let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone(), self.channel_value_satoshis, self.onchain_tx_handler.channel_type_features().clone());
-                       let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), self.best_block.height());
-                       claimable_outpoints.push(commitment_package);
-                       self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
-                       // Although we aren't signing the transaction directly here, the transaction will be signed
-                       // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
-                       // new channel updates.
-                       self.holder_tx_signed = true;
-                       // We can't broadcast our HTLC transactions while the commitment transaction is
-                       // unconfirmed. We'll delay doing so until we detect the confirmed commitment in
-                       // `transactions_confirmed`.
-                       if !self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
-                               // Because we're broadcasting a commitment transaction, we should construct the package
-                               // assuming it gets confirmed in the next block. Sadly, we have code which considers
-                               // "not yet confirmed" things as discardable, so we cannot do that here.
-                               let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
-                               let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
-                               let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &unsigned_commitment_tx);
-                               if !new_outputs.is_empty() {
-                                       watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
-                               }
-                               claimable_outpoints.append(&mut new_outpoints);
-                       }
+                       let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs();
+                       claimable_outpoints.append(&mut new_outpoints);
+                       watch_outputs.append(&mut new_outputs);
                }
 
                // Find which on-chain events have reached their confirmation threshold.
@@ -3702,10 +3857,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                watch_outputs
        }
 
-       pub fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, header: &Header, height: u32, broadcaster: B, fee_estimator: F, logger: L)
-               where B::Target: BroadcasterInterface,
-                     F::Target: FeeEstimator,
-                     L::Target: Logger,
+       fn block_disconnected<B: Deref, F: Deref, L: Deref>(
+               &mut self, header: &Header, height: u32, broadcaster: B, fee_estimator: F, logger: &WithChannelMonitor<L>
+       ) where B::Target: BroadcasterInterface,
+               F::Target: FeeEstimator,
+               L::Target: Logger,
        {
                log_trace!(logger, "Block {} at height {} disconnected", header.block_hash(), height);
 
@@ -3725,7 +3881,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                txid: &Txid,
                broadcaster: B,
                fee_estimator: &LowerBoundedFeeEstimator<F>,
-               logger: L,
+               logger: &WithChannelMonitor<L>,
        ) where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
@@ -3804,7 +3960,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                false
        }
 
-       fn should_broadcast_holder_commitment_txn<L: Deref>(&self, logger: &L) -> bool where L::Target: Logger {
+       fn should_broadcast_holder_commitment_txn<L: Deref>(
+               &self, logger: &WithChannelMonitor<L>
+       ) -> bool where L::Target: Logger {
                // There's no need to broadcast our commitment transaction if we've seen one confirmed (even
                // with 1 confirmation) as it'll be rejected as duplicate/conflicting.
                if self.funding_spend_confirmed.is_some() ||
@@ -3880,7 +4038,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
 
        /// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a holder
        /// or counterparty commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC
-       fn is_resolving_htlc_output<L: Deref>(&mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) where L::Target: Logger {
+       fn is_resolving_htlc_output<L: Deref>(
+               &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor<L>,
+       ) where L::Target: Logger {
                'outer_loop: for input in &tx.input {
                        let mut payment_data = None;
                        let htlc_claim = HTLCClaim::from_witness(&input.witness);
@@ -4082,6 +4242,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                spendable_outputs.push(SpendableOutputDescriptor::StaticOutput {
                                        outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
                                        output: outp.clone(),
+                                       channel_keys_id: Some(self.channel_keys_id),
                                });
                        }
                        if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script {
@@ -4110,6 +4271,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                spendable_outputs.push(SpendableOutputDescriptor::StaticOutput {
                                        outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
                                        output: outp.clone(),
+                                       channel_keys_id: Some(self.channel_keys_id),
                                });
                        }
                }
@@ -4119,7 +4281,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
        /// Checks if the confirmed transaction is paying funds back to some address we can assume to
        /// own.
        fn check_tx_and_push_spendable_outputs<L: Deref>(
-               &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L,
+               &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor<L>,
        ) where L::Target: Logger {
                for spendable_output in self.get_spendable_outputs(tx) {
                        let entry = OnchainEventEntry {
@@ -4142,11 +4304,11 @@ where
        L::Target: Logger,
 {
        fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
-               self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &*self.3);
+               self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &self.3);
        }
 
        fn block_disconnected(&self, header: &Header, height: u32) {
-               self.0.block_disconnected(header, height, &*self.1, &*self.2, &*self.3);
+               self.0.block_disconnected(header, height, &*self.1, &*self.2, &self.3);
        }
 }
 
@@ -4158,15 +4320,15 @@ where
        L::Target: Logger,
 {
        fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
-               self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &*self.3);
+               self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &self.3);
        }
 
        fn transaction_unconfirmed(&self, txid: &Txid) {
-               self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &*self.3);
+               self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &self.3);
        }
 
        fn best_block_updated(&self, header: &Header, height: u32) {
-               self.0.best_block_updated(header, height, &*self.1, &*self.2, &*self.3);
+               self.0.best_block_updated(header, height, &*self.1, &*self.2, &self.3);
        }
 
        fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
@@ -4177,7 +4339,7 @@ where
 const MAX_ALLOC_SIZE: usize = 64*1024;
 
 impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP)>
-               for (BlockHash, ChannelMonitor<SP::Signer>) {
+               for (BlockHash, ChannelMonitor<SP::EcdsaSigner>) {
        fn read<R: io::Read>(reader: &mut R, args: (&'a ES, &'b SP)) -> Result<Self, DecodeError> {
                macro_rules! unwrap_obj {
                        ($key: expr) => {
@@ -4363,7 +4525,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
                                return Err(DecodeError::InvalidValue);
                        }
                }
-               let onchain_tx_handler: OnchainTxHandler<SP::Signer> = ReadableArgs::read(
+               let onchain_tx_handler: OnchainTxHandler<SP::EcdsaSigner> = ReadableArgs::read(
                        reader, (entropy_source, signer_provider, channel_value_satoshis, channel_keys_id)
                )?;
 
@@ -4499,7 +4661,7 @@ mod tests {
        use super::ChannelMonitorUpdateStep;
        use crate::{check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash, unwrap_send_err};
        use crate::chain::{BestBlock, Confirm};
-       use crate::chain::channelmonitor::ChannelMonitor;
+       use crate::chain::channelmonitor::{ChannelMonitor, WithChannelMonitor};
        use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT};
        use crate::chain::transaction::OutPoint;
        use crate::sign::InMemorySigner;
@@ -4512,6 +4674,7 @@ mod tests {
        use crate::util::errors::APIError;
        use crate::util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator};
        use crate::util::ser::{ReadableArgs, Writeable};
+       use crate::util::logger::Logger;
        use crate::sync::{Arc, Mutex};
        use crate::io;
        use crate::ln::features::ChannelTypeFeatures;
@@ -4701,6 +4864,7 @@ mod tests {
 
                let mut htlcs = preimages_slice_to_htlcs!(preimages[0..10]);
                let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs);
+
                monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx.clone(),
                        htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()).unwrap();
                monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"1").to_byte_array()),
@@ -4898,5 +5062,62 @@ mod tests {
                }
        }
 
+       #[test]
+       fn test_with_channel_monitor_impl_logger() {
+               let secp_ctx = Secp256k1::new();
+               let logger = Arc::new(TestLogger::new());
+
+               let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+
+               let keys = InMemorySigner::new(
+                       &secp_ctx,
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       [41; 32],
+                       0,
+                       [0; 32],
+                       [0; 32],
+               );
+
+               let counterparty_pubkeys = ChannelPublicKeys {
+                       funding_pubkey: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[44; 32]).unwrap()),
+                       revocation_basepoint: RevocationBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap())),
+                       payment_point: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[46; 32]).unwrap()),
+                       delayed_payment_basepoint: DelayedPaymentBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[47; 32]).unwrap())),
+                       htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap())),
+               };
+               let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() };
+               let channel_parameters = ChannelTransactionParameters {
+                       holder_pubkeys: keys.holder_channel_pubkeys.clone(),
+                       holder_selected_contest_delay: 66,
+                       is_outbound_from_holder: true,
+                       counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
+                               pubkeys: counterparty_pubkeys,
+                               selected_contest_delay: 67,
+                       }),
+                       funding_outpoint: Some(funding_outpoint),
+                       channel_type_features: ChannelTypeFeatures::only_static_remote_key()
+               };
+               let shutdown_pubkey = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let best_block = BestBlock::from_network(Network::Testnet);
+               let monitor = ChannelMonitor::new(Secp256k1::new(), keys,
+                       Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(),
+                       (OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()),
+                       &channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()),
+                       best_block, dummy_key);
+
+               let chan_id = monitor.inner.lock().unwrap().funding_info.0.to_channel_id().clone();
+               let context_logger = WithChannelMonitor::from(&logger, &monitor);
+               log_error!(context_logger, "This is an error");
+               log_warn!(context_logger, "This is an error");
+               log_debug!(context_logger, "This is an error");
+               log_trace!(context_logger, "This is an error");
+               log_gossip!(context_logger, "This is an error");
+               log_info!(context_logger, "This is an error");
+               logger.assert_log_context_contains("lightning::chain::channelmonitor::tests", Some(dummy_key), Some(chan_id), 6);
+       }
        // Further testing is done in the ChannelManager integration tests.
 }
index a7c3a6d88badf6d41f9e4873b26eb4e2e4b2db86..dafce03ddb0d76ca1ca7f828f71b59c557345bab 100644 (file)
@@ -17,7 +17,7 @@ use bitcoin::network::constants::Network;
 use bitcoin::secp256k1::PublicKey;
 
 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent};
-use crate::sign::WriteableEcdsaChannelSigner;
+use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
 use crate::chain::transaction::{OutPoint, TransactionData};
 
 use crate::prelude::*;
@@ -357,7 +357,7 @@ pub struct WatchedOutput {
        pub script_pubkey: ScriptBuf,
 }
 
-impl<T: Listen> Listen for core::ops::Deref<Target = T> {
+impl<T: Listen> Listen for dyn core::ops::Deref<Target = T> {
        fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
                (**self).filtered_block_connected(header, txdata, height);
        }
index c28c572e6b5145980611a38bb4121188d4fcbbe9..59c98f05ebc4018f5915165d05e7be8facc697b9 100644 (file)
@@ -23,7 +23,7 @@ use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
 use bitcoin::secp256k1;
 
 use crate::chain::chaininterface::compute_feerate_sat_per_1000_weight;
-use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ChannelSigner, EntropySource, SignerProvider, WriteableEcdsaChannelSigner};
+use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ChannelSigner, EntropySource, SignerProvider, ecdsa::WriteableEcdsaChannelSigner};
 use crate::ln::msgs::DecodeError;
 use crate::ln::PaymentPreimage;
 use crate::ln::chan_utils::{self, ChannelTransactionParameters, HTLCOutputInCommitment, HolderCommitmentTransaction};
@@ -339,7 +339,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
        }
 }
 
-impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP, u64, [u8; 32])> for OnchainTxHandler<SP::Signer> {
+impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP, u64, [u8; 32])> for OnchainTxHandler<SP::EcdsaSigner> {
        fn read<R: io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u64, [u8; 32])) -> Result<Self, DecodeError> {
                let entropy_source = args.0;
                let signer_provider = args.1;
@@ -473,14 +473,13 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
        /// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
        /// invoking this every 30 seconds, or lower if running in an environment with spotty
        /// connections, like on mobile.
-       pub(crate) fn rebroadcast_pending_claims<B: Deref, F: Deref, L: Deref>(
+       pub(super) fn rebroadcast_pending_claims<B: Deref, F: Deref, L: Logger>(
                &mut self, current_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>,
                logger: &L,
        )
        where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
-               L::Target: Logger,
        {
                let mut bump_requests = Vec::with_capacity(self.pending_claim_requests.len());
                for (claim_id, request) in self.pending_claim_requests.iter() {
@@ -528,13 +527,11 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
        ///
        /// Panics if there are signing errors, because signing operations in reaction to on-chain
        /// events are not expected to fail, and if they do, we may lose funds.
-       fn generate_claim<F: Deref, L: Deref>(
+       fn generate_claim<F: Deref, L: Logger>(
                &mut self, cur_height: u32, cached_request: &PackageTemplate, force_feerate_bump: bool,
                fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L,
        ) -> Option<(u32, u64, OnchainClaim)>
-       where
-               F::Target: FeeEstimator,
-               L::Target: Logger,
+       where F::Target: FeeEstimator,
        {
                let request_outpoints = cached_request.outpoints();
                if request_outpoints.is_empty() {
@@ -679,6 +676,25 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                None
        }
 
+       pub fn abandon_claim(&mut self, outpoint: &BitcoinOutPoint) {
+               let claim_id = self.claimable_outpoints.get(outpoint).map(|(claim_id, _)| *claim_id)
+                       .or_else(|| {
+                               self.pending_claim_requests.iter()
+                                       .find(|(_, claim)| claim.outpoints().iter().any(|claim_outpoint| *claim_outpoint == outpoint))
+                                       .map(|(claim_id, _)| *claim_id)
+                       });
+               if let Some(claim_id) = claim_id {
+                       if let Some(claim) = self.pending_claim_requests.remove(&claim_id) {
+                               for outpoint in claim.outpoints() {
+                                       self.claimable_outpoints.remove(&outpoint);
+                               }
+                       }
+               } else {
+                       self.locktimed_packages.values_mut().for_each(|claims|
+                               claims.retain(|claim| !claim.outpoints().iter().any(|claim_outpoint| *claim_outpoint == outpoint)));
+               }
+       }
+
        /// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
        /// for this channel, provide new relevant on-chain transactions and/or new claim requests.
        /// Together with `update_claims_view_from_matched_txn` this used to be named
@@ -688,13 +704,12 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
        /// `conf_height` represents the height at which the request was generated. This
        /// does not need to equal the current blockchain tip height, which should be provided via
        /// `cur_height`, however it must never be higher than `cur_height`.
-       pub(crate) fn update_claims_view_from_requests<B: Deref, F: Deref, L: Deref>(
+       pub(super) fn update_claims_view_from_requests<B: Deref, F: Deref, L: Logger>(
                &mut self, requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32,
                broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
        ) where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
-               L::Target: Logger,
        {
                log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len());
                let mut preprocessed_requests = Vec::with_capacity(requests.len());
@@ -809,13 +824,12 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
        /// `conf_height` represents the height at which the transactions in `txn_matched` were
        /// confirmed. This does not need to equal the current blockchain tip height, which should be
        /// provided via `cur_height`, however it must never be higher than `cur_height`.
-       pub(crate) fn update_claims_view_from_matched_txn<B: Deref, F: Deref, L: Deref>(
+       pub(super) fn update_claims_view_from_matched_txn<B: Deref, F: Deref, L: Logger>(
                &mut self, txn_matched: &[&Transaction], conf_height: u32, conf_hash: BlockHash,
                cur_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
        ) where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
-               L::Target: Logger,
        {
                log_debug!(logger, "Updating claims view at height {} with {} matched transactions in block {}", cur_height, txn_matched.len(), conf_height);
                let mut bump_candidates = HashMap::new();
@@ -977,16 +991,15 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                }
        }
 
-       pub(crate) fn transaction_unconfirmed<B: Deref, F: Deref, L: Deref>(
+       pub(super) fn transaction_unconfirmed<B: Deref, F: Deref, L: Logger>(
                &mut self,
                txid: &Txid,
                broadcaster: B,
                fee_estimator: &LowerBoundedFeeEstimator<F>,
-               logger: L,
+               logger: &L,
        ) where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
-               L::Target: Logger,
        {
                let mut height = None;
                for entry in self.onchain_events_awaiting_threshold_conf.iter() {
@@ -1001,10 +1014,9 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                }
        }
 
-       pub(crate) fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: L)
+       pub(super) fn block_disconnected<B: Deref, F: Deref, L: Logger>(&mut self, height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
                where B::Target: BroadcasterInterface,
-                     F::Target: FeeEstimator,
-                                       L::Target: Logger,
+                       F::Target: FeeEstimator,
        {
                let mut bump_candidates = HashMap::new();
                let onchain_events_awaiting_threshold_conf =
@@ -1034,7 +1046,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                        // `height` is the height being disconnected, so our `current_height` is 1 lower.
                        let current_height = height - 1;
                        if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(
-                               current_height, &request, true /* force_feerate_bump */, fee_estimator, &&*logger
+                               current_height, &request, true /* force_feerate_bump */, fee_estimator, logger
                        ) {
                                request.set_timer(new_timer);
                                request.set_feerate(new_feerate);
index 0759e80eb630e2e035d1a45cd9bf35b1140c0ef0..efc32bf7d40d9adcdfaff19ef23e3dc66b363bd7 100644 (file)
@@ -28,7 +28,7 @@ use crate::ln::features::ChannelTypeFeatures;
 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
 use crate::ln::msgs::DecodeError;
 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT, compute_feerate_sat_per_1000_weight, FEERATE_FLOOR_SATS_PER_KW};
-use crate::sign::WriteableEcdsaChannelSigner;
+use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
 use crate::chain::onchaintx::{ExternalHTLCClaim, OnchainTxHandler};
 use crate::util::logger::Logger;
 use crate::util::ser::{Readable, Writer, Writeable, RequiredWrapper};
@@ -908,10 +908,10 @@ impl PackageTemplate {
                }
                htlcs
        }
-       pub(crate) fn finalize_malleable_package<L: Deref, Signer: WriteableEcdsaChannelSigner>(
+       pub(crate) fn finalize_malleable_package<L: Logger, Signer: WriteableEcdsaChannelSigner>(
                &self, current_height: u32, onchain_handler: &mut OnchainTxHandler<Signer>, value: u64,
                destination_script: ScriptBuf, logger: &L
-       ) -> Option<Transaction> where L::Target: Logger {
+       ) -> Option<Transaction> {
                debug_assert!(self.is_malleable());
                let mut bumped_tx = Transaction {
                        version: 2,
@@ -932,9 +932,9 @@ impl PackageTemplate {
                log_debug!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid());
                Some(bumped_tx)
        }
-       pub(crate) fn finalize_untractable_package<L: Deref, Signer: WriteableEcdsaChannelSigner>(
+       pub(crate) fn finalize_untractable_package<L: Logger, Signer: WriteableEcdsaChannelSigner>(
                &self, onchain_handler: &mut OnchainTxHandler<Signer>, logger: &L,
-       ) -> Option<Transaction> where L::Target: Logger {
+       ) -> Option<Transaction> {
                debug_assert!(!self.is_malleable());
                if let Some((outpoint, outp)) = self.inputs.first() {
                        if let Some(final_tx) = outp.get_finalized_tx(outpoint, onchain_handler) {
@@ -962,13 +962,11 @@ impl PackageTemplate {
        /// Returns value in satoshis to be included as package outgoing output amount and feerate
        /// which was used to generate the value. Will not return less than `dust_limit_sats` for the
        /// value.
-       pub(crate) fn compute_package_output<F: Deref, L: Deref>(
+       pub(crate) fn compute_package_output<F: Deref, L: Logger>(
                &self, predicted_weight: u64, dust_limit_sats: u64, force_feerate_bump: bool,
                fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L,
        ) -> Option<(u64, u64)>
-       where
-               F::Target: FeeEstimator,
-               L::Target: Logger,
+       where F::Target: FeeEstimator,
        {
                debug_assert!(self.malleability == PackageMalleability::Malleable, "The package output is fixed for non-malleable packages");
                let input_amounts = self.package_amount();
@@ -1111,9 +1109,8 @@ impl Readable for PackageTemplate {
 ///
 /// [`OnChainSweep`]: crate::chain::chaininterface::ConfirmationTarget::OnChainSweep
 /// [`FEERATE_FLOOR_SATS_PER_KW`]: crate::chain::chaininterface::MIN_RELAY_FEE_SAT_PER_1000_WEIGHT
-fn compute_fee_from_spent_amounts<F: Deref, L: Deref>(input_amounts: u64, predicted_weight: u64, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L) -> Option<(u64, u64)>
+fn compute_fee_from_spent_amounts<F: Deref, L: Logger>(input_amounts: u64, predicted_weight: u64, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L) -> Option<(u64, u64)>
        where F::Target: FeeEstimator,
-             L::Target: Logger,
 {
        let sweep_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::OnChainSweep);
        let fee_rate = cmp::min(sweep_feerate, compute_feerate_sat_per_1000_weight(input_amounts / 2, predicted_weight));
@@ -1135,13 +1132,12 @@ fn compute_fee_from_spent_amounts<F: Deref, L: Deref>(input_amounts: u64, predic
 /// feerate, or just use the previous feerate otherwise. If a feerate bump did happen, we also
 /// verify that those bumping heuristics respect BIP125 rules 3) and 4) and if required adjust the
 /// new fee to meet the RBF policy requirement.
-fn feerate_bump<F: Deref, L: Deref>(
+fn feerate_bump<F: Deref, L: Logger>(
        predicted_weight: u64, input_amounts: u64, previous_feerate: u64, force_feerate_bump: bool,
        fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L,
 ) -> Option<(u64, u64)>
 where
        F::Target: FeeEstimator,
-       L::Target: Logger,
 {
        // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee...
        let (new_fee, new_feerate) = if let Some((new_fee, new_feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) {
index 7c6cc6c6a5688181b6465fbb7f775c255aa0261f..1b019ba349e0ae8687a190b92ebd144fe164f6f9 100644 (file)
@@ -25,9 +25,9 @@ use crate::ln::chan_utils::{
 };
 use crate::prelude::*;
 use crate::sign::{
-       ChannelDerivationParameters, HTLCDescriptor, EcdsaChannelSigner, SignerProvider,
-       WriteableEcdsaChannelSigner, P2WPKH_WITNESS_WEIGHT
+       ChannelDerivationParameters, HTLCDescriptor, SignerProvider, P2WPKH_WITNESS_WEIGHT
 };
+use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
 use crate::sync::Mutex;
 use crate::util::logger::Logger;
 
@@ -35,6 +35,7 @@ use bitcoin::{OutPoint, PubkeyHash, Sequence, ScriptBuf, Transaction, TxIn, TxOu
 use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR;
 use bitcoin::blockdata::locktime::absolute::LockTime;
 use bitcoin::consensus::Encodable;
+use bitcoin::psbt::PartiallySignedTransaction;
 use bitcoin::secp256k1;
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1::ecdsa::Signature;
@@ -92,7 +93,7 @@ impl AnchorDescriptor {
        /// Derives the channel signer required to sign the anchor input.
        pub fn derive_channel_signer<S: WriteableEcdsaChannelSigner, SP: Deref>(&self, signer_provider: &SP) -> S
        where
-               SP::Target: SignerProvider<Signer = S>
+               SP::Target: SignerProvider<EcdsaSigner= S>
        {
                let mut signer = signer_provider.derive_channel_signer(
                        self.channel_derivation_parameters.value_satoshis,
@@ -142,8 +143,8 @@ pub enum BumpTransactionEvent {
        /// an empty `pending_htlcs`), confirmation of the commitment transaction can be considered to
        /// be not urgent.
        ///
-       /// [`EcdsaChannelSigner`]: crate::sign::EcdsaChannelSigner
-       /// [`EcdsaChannelSigner::sign_holder_anchor_input`]: crate::sign::EcdsaChannelSigner::sign_holder_anchor_input
+       /// [`EcdsaChannelSigner`]: crate::sign::ecdsa::EcdsaChannelSigner
+       /// [`EcdsaChannelSigner::sign_holder_anchor_input`]: crate::sign::ecdsa::EcdsaChannelSigner::sign_holder_anchor_input
        /// [`build_anchor_input_witness`]: crate::ln::chan_utils::build_anchor_input_witness
        ChannelClose {
                /// The unique identifier for the claim of the anchor output in the commitment transaction.
@@ -196,8 +197,8 @@ pub enum BumpTransactionEvent {
        /// longer able to commit external confirmed funds to the HTLC transaction or the fee committed
        /// to the HTLC transaction is greater in value than the HTLCs being claimed.
        ///
-       /// [`EcdsaChannelSigner`]: crate::sign::EcdsaChannelSigner
-       /// [`EcdsaChannelSigner::sign_holder_htlc_transaction`]: crate::sign::EcdsaChannelSigner::sign_holder_htlc_transaction
+       /// [`EcdsaChannelSigner`]: crate::sign::ecdsa::EcdsaChannelSigner
+       /// [`EcdsaChannelSigner::sign_holder_htlc_transaction`]: crate::sign::ecdsa::EcdsaChannelSigner::sign_holder_htlc_transaction
        HTLCResolution {
                /// The unique identifier for the claim of the HTLCs in the confirmed commitment
                /// transaction.
@@ -343,7 +344,10 @@ pub trait CoinSelectionSource {
        ) -> Result<CoinSelection, ()>;
        /// Signs and provides the full witness for all inputs within the transaction known to the
        /// trait (i.e., any provided via [`CoinSelectionSource::select_confirmed_utxos`]).
-       fn sign_tx(&self, tx: Transaction) -> Result<Transaction, ()>;
+       ///
+       /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the
+       /// unsigned transaction and then sign it with your wallet.
+       fn sign_psbt(&self, psbt: PartiallySignedTransaction) -> Result<Transaction, ()>;
 }
 
 /// An alternative to [`CoinSelectionSource`] that can be implemented and used along [`Wallet`] to
@@ -357,7 +361,10 @@ pub trait WalletSource {
        /// Signs and provides the full [`TxIn::script_sig`] and [`TxIn::witness`] for all inputs within
        /// the transaction known to the wallet (i.e., any provided via
        /// [`WalletSource::list_confirmed_utxos`]).
-       fn sign_tx(&self, tx: Transaction) -> Result<Transaction, ()>;
+       ///
+       /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the
+       /// unsigned transaction and then sign it with your wallet.
+       fn sign_psbt(&self, psbt: PartiallySignedTransaction) -> Result<Transaction, ()>;
 }
 
 /// A wrapper over [`WalletSource`] that implements [`CoinSelection`] by preferring UTXOs that would
@@ -504,8 +511,8 @@ where
                        .or_else(|_| do_coin_selection(true, true))
        }
 
-       fn sign_tx(&self, tx: Transaction) -> Result<Transaction, ()> {
-               self.source.sign_tx(tx)
+       fn sign_psbt(&self, psbt: PartiallySignedTransaction) -> Result<Transaction, ()> {
+               self.source.sign_psbt(psbt)
        }
 }
 
@@ -549,8 +556,8 @@ where
        }
 
        /// Updates a transaction with the result of a successful coin selection attempt.
-       fn process_coin_selection(&self, tx: &mut Transaction, mut coin_selection: CoinSelection) {
-               for utxo in coin_selection.confirmed_utxos.drain(..) {
+       fn process_coin_selection(&self, tx: &mut Transaction, coin_selection: &CoinSelection) {
+               for utxo in coin_selection.confirmed_utxos.iter() {
                        tx.input.push(TxIn {
                                previous_output: utxo.outpoint,
                                script_sig: ScriptBuf::new(),
@@ -558,7 +565,7 @@ where
                                witness: Witness::new(),
                        });
                }
-               if let Some(change_output) = coin_selection.change_output.take() {
+               if let Some(change_output) = coin_selection.change_output.clone() {
                        tx.output.push(change_output);
                } else if tx.output.is_empty() {
                        // We weren't provided a change output, likely because the input set was a perfect
@@ -595,7 +602,7 @@ where
 
                log_debug!(self.logger, "Peforming coin selection for commitment package (commitment and anchor transaction) targeting {} sat/kW",
                        package_target_feerate_sat_per_1000_weight);
-               let coin_selection = self.utxo_source.select_confirmed_utxos(
+               let coin_selection: CoinSelection = self.utxo_source.select_confirmed_utxos(
                        claim_id, must_spend, &[], package_target_feerate_sat_per_1000_weight,
                )?;
 
@@ -613,15 +620,29 @@ where
                let total_input_amount = must_spend_amount +
                        coin_selection.confirmed_utxos.iter().map(|utxo| utxo.output.value).sum::<u64>();
 
-               self.process_coin_selection(&mut anchor_tx, coin_selection);
+               self.process_coin_selection(&mut anchor_tx, &coin_selection);
                let anchor_txid = anchor_tx.txid();
 
-               debug_assert_eq!(anchor_tx.output.len(), 1);
+               // construct psbt
+               let mut anchor_psbt = PartiallySignedTransaction::from_unsigned_tx(anchor_tx).unwrap();
+               // add witness_utxo to anchor input
+               anchor_psbt.inputs[0].witness_utxo = Some(anchor_descriptor.previous_utxo());
+               // add witness_utxo to remaining inputs
+               for (idx, utxo) in coin_selection.confirmed_utxos.into_iter().enumerate() {
+                       // add 1 to skip the anchor input
+                       let index = idx + 1;
+                       debug_assert_eq!(anchor_psbt.unsigned_tx.input[index].previous_output, utxo.outpoint);
+                       if utxo.output.script_pubkey.is_witness_program() {
+                               anchor_psbt.inputs[index].witness_utxo = Some(utxo.output);
+                       }
+               }
+
+               debug_assert_eq!(anchor_psbt.unsigned_tx.output.len(), 1);
                #[cfg(debug_assertions)]
-               let unsigned_tx_weight = anchor_tx.weight().to_wu() - (anchor_tx.input.len() as u64 * EMPTY_SCRIPT_SIG_WEIGHT);
+               let unsigned_tx_weight = anchor_psbt.unsigned_tx.weight().to_wu() - (anchor_psbt.unsigned_tx.input.len() as u64 * EMPTY_SCRIPT_SIG_WEIGHT);
 
                log_debug!(self.logger, "Signing anchor transaction {}", anchor_txid);
-               anchor_tx = self.utxo_source.sign_tx(anchor_tx)?;
+               anchor_tx = self.utxo_source.sign_psbt(anchor_psbt)?;
 
                let signer = anchor_descriptor.derive_channel_signer(&self.signer_provider);
                let anchor_sig = signer.sign_holder_anchor_input(&anchor_tx, 0, &self.secp)?;
@@ -690,7 +711,7 @@ where
                #[cfg(debug_assertions)]
                let must_spend_amount = must_spend.iter().map(|input| input.previous_utxo.value).sum::<u64>();
 
-               let coin_selection = self.utxo_source.select_confirmed_utxos(
+               let coin_selection: CoinSelection = self.utxo_source.select_confirmed_utxos(
                        claim_id, must_spend, &htlc_tx.output, target_feerate_sat_per_1000_weight,
                )?;
 
@@ -701,13 +722,30 @@ where
                let total_input_amount = must_spend_amount +
                        coin_selection.confirmed_utxos.iter().map(|utxo| utxo.output.value).sum::<u64>();
 
-               self.process_coin_selection(&mut htlc_tx, coin_selection);
+               self.process_coin_selection(&mut htlc_tx, &coin_selection);
+
+               // construct psbt
+               let mut htlc_psbt = PartiallySignedTransaction::from_unsigned_tx(htlc_tx).unwrap();
+               // add witness_utxo to htlc inputs
+               for (i, htlc_descriptor) in htlc_descriptors.iter().enumerate() {
+                       debug_assert_eq!(htlc_psbt.unsigned_tx.input[i].previous_output, htlc_descriptor.outpoint());
+                       htlc_psbt.inputs[i].witness_utxo = Some(htlc_descriptor.previous_utxo(&self.secp));
+               }
+               // add witness_utxo to remaining inputs
+               for (idx, utxo) in coin_selection.confirmed_utxos.into_iter().enumerate() {
+                       // offset to skip the htlc inputs
+                       let index = idx + htlc_descriptors.len();
+                       debug_assert_eq!(htlc_psbt.unsigned_tx.input[index].previous_output, utxo.outpoint);
+                       if utxo.output.script_pubkey.is_witness_program() {
+                               htlc_psbt.inputs[index].witness_utxo = Some(utxo.output);
+                       }
+               }
 
                #[cfg(debug_assertions)]
-               let unsigned_tx_weight = htlc_tx.weight().to_wu() - (htlc_tx.input.len() as u64 * EMPTY_SCRIPT_SIG_WEIGHT);
+               let unsigned_tx_weight = htlc_psbt.unsigned_tx.weight().to_wu() - (htlc_psbt.unsigned_tx.input.len() as u64 * EMPTY_SCRIPT_SIG_WEIGHT);
 
-               log_debug!(self.logger, "Signing HTLC transaction {}", htlc_tx.txid());
-               htlc_tx = self.utxo_source.sign_tx(htlc_tx)?;
+               log_debug!(self.logger, "Signing HTLC transaction {}", htlc_psbt.unsigned_tx.txid());
+               htlc_tx = self.utxo_source.sign_psbt(htlc_psbt)?;
 
                let mut signers = BTreeMap::new();
                for (idx, htlc_descriptor) in htlc_descriptors.iter().enumerate() {
index adbc7faf7e0f9d0acf1fa3349495733669d41193..76e5f25c0e5f6eef87b2ff48102dd19acc239231 100644 (file)
@@ -72,6 +72,16 @@ pub enum PaymentPurpose {
        SpontaneousPayment(PaymentPreimage),
 }
 
+impl PaymentPurpose {
+       /// Returns the preimage for this payment, if it is known.
+       pub fn preimage(&self) -> Option<PaymentPreimage> {
+               match self {
+                       PaymentPurpose::InvoicePayment { payment_preimage, .. } => *payment_preimage,
+                       PaymentPurpose::SpontaneousPayment(preimage) => Some(*preimage),
+               }
+       }
+}
+
 impl_writeable_tlv_based_enum!(PaymentPurpose,
        (0, InvoicePayment) => {
                (0, payment_preimage, option),
@@ -520,6 +530,25 @@ pub enum Event {
                /// serialized prior to LDK version 0.0.117.
                sender_intended_total_msat: Option<u64>,
        },
+       /// Indicates that a peer connection with a node is needed in order to send an [`OnionMessage`].
+       ///
+       /// Typically, this happens when a [`MessageRouter`] is unable to find a complete path to a
+       /// [`Destination`]. Once a connection is established, any messages buffered by an
+       /// [`OnionMessageHandler`] may be sent.
+       ///
+       /// This event will not be generated for onion message forwards; only for sends including
+       /// replies. Handlers should connect to the node otherwise any buffered messages may be lost.
+       ///
+       /// [`OnionMessage`]: msgs::OnionMessage
+       /// [`MessageRouter`]: crate::onion_message::MessageRouter
+       /// [`Destination`]: crate::onion_message::Destination
+       /// [`OnionMessageHandler`]: crate::ln::msgs::OnionMessageHandler
+       ConnectionNeeded {
+               /// The node id for the node needing a connection.
+               node_id: PublicKey,
+               /// Sockets for connecting to the node.
+               addresses: Vec<msgs::SocketAddress>,
+       },
        /// Indicates a request for an invoice failed to yield a response in a reasonable amount of time
        /// or was explicitly abandoned by [`ChannelManager::abandon_payment`]. This may be for an
        /// [`InvoiceRequest`] sent for an [`Offer`] or for a [`Refund`] that hasn't been redeemed.
@@ -1180,6 +1209,10 @@ impl Writeable for Event {
                                        (0, payment_id, required),
                                })
                        },
+                       &Event::ConnectionNeeded { .. } => {
+                               35u8.write(writer)?;
+                               // Never write ConnectionNeeded events as buffered onion messages aren't serialized.
+                       },
                        // Note that, going forward, all new events must only write data inside of
                        // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write
                        // data via `write_tlv_fields`.
@@ -1190,8 +1223,7 @@ impl Writeable for Event {
 impl MaybeReadable for Event {
        fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, msgs::DecodeError> {
                match Readable::read(reader)? {
-                       // Note that we do not write a length-prefixed TLV for FundingGenerationReady events,
-                       // unlike all other events, thus we return immediately here.
+                       // Note that we do not write a length-prefixed TLV for FundingGenerationReady events.
                        0u8 => Ok(None),
                        1u8 => {
                                let f = || {
@@ -1578,6 +1610,8 @@ impl MaybeReadable for Event {
                                };
                                f()
                        },
+                       // Note that we do not write a length-prefixed TLV for ConnectionNeeded events.
+                       35u8 => Ok(None),
                        // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
                        // Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt
                        // reads.
index df1bb1a2a2f2ef807f55e72f9bb50f96380e17c7..6eefb3983cca95f1bb0351a43bfddf39d2be58db 100644 (file)
@@ -40,9 +40,8 @@
 #![cfg_attr(not(any(test, fuzzing, feature = "_test_utils")), deny(missing_docs))]
 #![cfg_attr(not(any(test, feature = "_test_utils")), forbid(unsafe_code))]
 
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 // In general, rust is absolutely horrid at supporting users doing things like,
 // for example, compiling Rust code for real environments. Disable useless lints
@@ -70,6 +69,7 @@ extern crate hex;
 #[cfg(any(test, feature = "_test_utils"))] extern crate regex;
 
 #[cfg(not(feature = "std"))] extern crate core2;
+#[cfg(not(feature = "std"))] extern crate libm;
 
 #[cfg(ldk_bench)] extern crate criterion;
 
index e8b6bfd679a43cbd7ec0eb51296d24a5c8dea2cc..9b580d1fa7599df71a70cf3d8819d4402545d5c8 100644 (file)
@@ -7,17 +7,66 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
-use bitcoin::secp256k1::Secp256k1;
+use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
 use crate::blinded_path::BlindedPath;
-use crate::blinded_path::payment::{PaymentConstraints, ReceiveTlvs};
-use crate::events::MessageSendEventsProvider;
+use crate::blinded_path::payment::{ForwardNode, ForwardTlvs, PaymentConstraints, PaymentRelay, ReceiveTlvs};
+use crate::events::{HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
+use crate::ln::PaymentSecret;
 use crate::ln::channelmanager;
 use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
+use crate::ln::features::BlindedHopFeatures;
 use crate::ln::functional_test_utils::*;
+use crate::ln::msgs;
+use crate::ln::msgs::ChannelMessageHandler;
+use crate::ln::onion_utils;
+use crate::ln::onion_utils::INVALID_ONION_BLINDING;
 use crate::ln::outbound_payment::Retry;
 use crate::prelude::*;
-use crate::routing::router::{PaymentParameters, RouteParameters};
+use crate::routing::router::{Payee, PaymentParameters, RouteParameters};
 use crate::util::config::UserConfig;
+use crate::util::test_utils;
+
+pub fn get_blinded_route_parameters(
+       amt_msat: u64, payment_secret: PaymentSecret, node_ids: Vec<PublicKey>,
+       channel_upds: &[&msgs::UnsignedChannelUpdate], keys_manager: &test_utils::TestKeysInterface
+) -> RouteParameters {
+       let mut intermediate_nodes = Vec::new();
+       for (node_id, chan_upd) in node_ids.iter().zip(channel_upds) {
+               intermediate_nodes.push(ForwardNode {
+                       node_id: *node_id,
+                       tlvs: ForwardTlvs {
+                               short_channel_id: chan_upd.short_channel_id,
+                               payment_relay: PaymentRelay {
+                                       cltv_expiry_delta: chan_upd.cltv_expiry_delta,
+                                       fee_proportional_millionths: chan_upd.fee_proportional_millionths,
+                                       fee_base_msat: chan_upd.fee_base_msat,
+                               },
+                               payment_constraints: PaymentConstraints {
+                                       max_cltv_expiry: u32::max_value(),
+                                       htlc_minimum_msat: chan_upd.htlc_minimum_msat,
+                               },
+                               features: BlindedHopFeatures::empty(),
+                       },
+                       htlc_maximum_msat: chan_upd.htlc_maximum_msat,
+               });
+       }
+       let payee_tlvs = ReceiveTlvs {
+               payment_secret,
+               payment_constraints: PaymentConstraints {
+                       max_cltv_expiry: u32::max_value(),
+                       htlc_minimum_msat: channel_upds.last().unwrap().htlc_minimum_msat,
+               },
+       };
+       let mut secp_ctx = Secp256k1::new();
+       let blinded_path = BlindedPath::new_for_payment(
+               &intermediate_nodes[..], *node_ids.last().unwrap(), payee_tlvs,
+               channel_upds.last().unwrap().htlc_maximum_msat, keys_manager, &secp_ctx
+       ).unwrap();
+
+       RouteParameters::from_payment_params_and_value(
+               PaymentParameters::blinded(vec![blinded_path]), amt_msat
+       )
+}
 
 #[test]
 fn one_hop_blinded_path() {
@@ -109,3 +158,545 @@ fn mpp_to_one_hop_blinded_path() {
                Some(payment_secret), ev.clone(), true, None);
        claim_payment_along_route(&nodes[0], expected_route, false, payment_preimage);
 }
+
+enum ForwardCheckFail {
+       // Fail a check on the inbound onion payload. In this case, we underflow when calculating the
+       // outgoing cltv_expiry.
+       InboundOnionCheck,
+       // The forwarding node's payload is encoded as a receive, i.e. the next hop HMAC is [0; 32].
+       ForwardPayloadEncodedAsReceive,
+       // Fail a check on the outbound channel. In this case, our next-hop peer is offline.
+       OutboundChannelCheck,
+}
+
+#[test]
+fn forward_checks_failure() {
+       do_forward_checks_failure(ForwardCheckFail::InboundOnionCheck);
+       do_forward_checks_failure(ForwardCheckFail::ForwardPayloadEncodedAsReceive);
+       do_forward_checks_failure(ForwardCheckFail::OutboundChannelCheck);
+}
+
+fn do_forward_checks_failure(check: ForwardCheckFail) {
+       // Ensure we'll fail backwards properly if a forwarding check fails on initial update_add
+       // receipt.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       // We need the session priv to construct a bogus onion packet later.
+       *nodes[0].keys_manager.override_random_bytes.lock().unwrap() = Some([3; 32]);
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+       let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
+
+       let amt_msat = 5000;
+       let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
+       let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+               nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
+               &chanmon_cfgs[2].keys_manager);
+
+       let route = get_route(&nodes[0], &route_params).unwrap();
+       node_cfgs[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+       nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+       check_added_monitors(&nodes[0], 1);
+
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+       let mut payment_event = SendEvent::from_event(ev);
+
+       let mut update_add = &mut payment_event.msgs[0];
+       match check {
+               ForwardCheckFail::InboundOnionCheck => {
+                       update_add.cltv_expiry = 10; // causes outbound CLTV expiry to underflow
+               },
+               ForwardCheckFail::ForwardPayloadEncodedAsReceive => {
+                       let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+                       let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
+                       let cur_height = nodes[0].best_block_info().1;
+                       let (mut onion_payloads, ..) = onion_utils::build_onion_payloads(
+                               &route.paths[0], amt_msat, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
+                       // Remove the receive payload so the blinded forward payload is encoded as a final payload
+                       // (i.e. next_hop_hmac == [0; 32])
+                       onion_payloads.pop();
+                       update_add.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
+               },
+               ForwardCheckFail::OutboundChannelCheck => {
+                       // The intro node will see that the next-hop peer is disconnected and fail the HTLC backwards.
+                       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+               },
+       }
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       check_added_monitors!(nodes[1], 0);
+       do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, true, true);
+
+       let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+       do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+               PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+}
+
+#[test]
+fn failed_backwards_to_intro_node() {
+       // Ensure the intro node will error backwards properly even if the downstream node did not blind
+       // their error.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+       let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
+
+       let amt_msat = 5000;
+       let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
+       let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+               nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
+               &chanmon_cfgs[2].keys_manager);
+
+       nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+       check_added_monitors(&nodes[0], 1);
+
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+       let mut payment_event = SendEvent::from_event(ev);
+
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       check_added_monitors!(nodes[1], 0);
+       do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       check_added_monitors!(&nodes[1], 1);
+
+       let mut events = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
+       let mut payment_event = SendEvent::from_event(ev);
+
+       // Ensure the final node fails to handle the HTLC.
+       payment_event.msgs[0].onion_routing_packet.hop_data[0] ^= 1;
+       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
+       check_added_monitors!(nodes[2], 0);
+       do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true);
+       nodes[2].node.process_pending_htlc_forwards();
+
+       let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+       let mut update_malformed = &mut updates.update_fail_malformed_htlcs[0];
+       // Check that the final node encodes its failure correctly.
+       assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
+       assert_eq!(update_malformed.sha256_of_onion, [0; 32]);
+
+       // Modify such the final hop does not correctly blind their error so we can ensure the intro node
+       // converts it to the correct error.
+       update_malformed.sha256_of_onion = [1; 32];
+       nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), update_malformed);
+       do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false);
+
+       let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+       do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+               PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+}
+
+enum ProcessPendingHTLCsCheck {
+       FwdPeerDisconnected,
+       FwdChannelClosed,
+}
+
+#[test]
+fn forward_fail_in_process_pending_htlc_fwds() {
+       do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdPeerDisconnected);
+       do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdChannelClosed);
+}
+fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck) {
+       // Ensure the intro node will error backwards properly if the HTLC fails in
+       // process_pending_htlc_forwards.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+       let (chan_upd_1_2, channel_id) = {
+               let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
+               (chan.0.contents, chan.2)
+       };
+
+       let amt_msat = 5000;
+       let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
+       let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+               nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
+               &chanmon_cfgs[2].keys_manager);
+
+       nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+       check_added_monitors(&nodes[0], 1);
+
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+       let mut payment_event = SendEvent::from_event(ev);
+
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       check_added_monitors!(nodes[1], 0);
+       do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false);
+
+       match check {
+               ProcessPendingHTLCsCheck::FwdPeerDisconnected => {
+                       // Disconnect the next-hop peer so when we go to forward in process_pending_htlc_forwards, the
+                       // intro node will error backwards.
+                       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+                       expect_pending_htlcs_forwardable!(nodes[1]);
+                       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],
+                               vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id }]);
+               },
+               ProcessPendingHTLCsCheck::FwdChannelClosed => {
+                       // Force close the next-hop channel so when we go to forward in process_pending_htlc_forwards,
+                       // the intro node will error backwards.
+                       nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
+                       let events = nodes[1].node.get_and_clear_pending_events();
+                       match events[0] {
+                               crate::events::Event::PendingHTLCsForwardable { .. } => {},
+                               _ => panic!("Unexpected event {:?}", events),
+                       };
+                       match events[1] {
+                               crate::events::Event::ChannelClosed { .. } => {},
+                               _ => panic!("Unexpected event {:?}", events),
+                       }
+
+                       nodes[1].node.process_pending_htlc_forwards();
+                       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],
+                               vec![HTLCDestination::UnknownNextHop { requested_forward_scid: chan_upd_1_2.short_channel_id }]);
+                       check_closed_broadcast(&nodes[1], 1, true);
+                       check_added_monitors!(nodes[1], 1);
+                       nodes[1].node.process_pending_htlc_forwards();
+               },
+       }
+
+       let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+       check_added_monitors!(nodes[1], 1);
+       do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
+
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+               PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+}
+
+#[test]
+fn blinded_intercept_payment() {
+       do_blinded_intercept_payment(true);
+       do_blinded_intercept_payment(false);
+}
+fn do_blinded_intercept_payment(intercept_node_fails: bool) {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let mut intercept_forwards_config = test_default_channel_config();
+       intercept_forwards_config.accept_intercept_htlcs = true;
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+       let (channel_id, chan_upd) = {
+               let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
+               (chan.2, chan.0.contents)
+       };
+
+       let amt_msat = 5000;
+       let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
+       let intercept_scid = nodes[1].node.get_intercept_scid();
+       let mut intercept_chan_upd = chan_upd;
+       intercept_chan_upd.short_channel_id = intercept_scid;
+       let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+               nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&intercept_chan_upd],
+               &chanmon_cfgs[2].keys_manager);
+
+       nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(),
+       PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+       check_added_monitors(&nodes[0], 1);
+       let payment_event = {
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               SendEvent::from_event(events.remove(0))
+       };
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true);
+
+       let events = nodes[1].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       let (intercept_id, expected_outbound_amount_msat) = match events[0] {
+               crate::events::Event::HTLCIntercepted {
+                       intercept_id, payment_hash: pmt_hash,
+                       requested_next_hop_scid: short_channel_id, expected_outbound_amount_msat, ..
+               } => {
+                       assert_eq!(pmt_hash, payment_hash);
+                       assert_eq!(short_channel_id, intercept_scid);
+                       (intercept_id, expected_outbound_amount_msat)
+               },
+               _ => panic!()
+       };
+
+       if intercept_node_fails {
+               nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap();
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::UnknownNextHop { requested_forward_scid: intercept_scid }]);
+               nodes[1].node.process_pending_htlc_forwards();
+               let update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               check_added_monitors!(&nodes[1], 1);
+               assert!(update_fail.update_fail_htlcs.len() == 1);
+               let fail_msg = update_fail.update_fail_htlcs[0].clone();
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
+               commitment_signed_dance!(nodes[0], nodes[1], update_fail.commitment_signed, false);
+               expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+                       PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+               return
+       }
+
+       nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap();
+       expect_pending_htlcs_forwardable!(nodes[1]);
+
+       let payment_event = {
+               {
+                       let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
+                       assert_eq!(added_monitors.len(), 1);
+                       added_monitors.clear();
+               }
+               let mut events = nodes[1].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               SendEvent::from_event(events.remove(0))
+       };
+       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
+       commitment_signed_dance!(nodes[2], nodes[1], &payment_event.commitment_msg, false, true);
+       expect_pending_htlcs_forwardable!(nodes[2]);
+
+       expect_payment_claimable!(&nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id());
+       do_claim_payment_along_route(&nodes[0], &vec!(&vec!(&nodes[1], &nodes[2])[..]), false, payment_preimage);
+       expect_payment_sent(&nodes[0], payment_preimage, Some(Some(1000)), true, true);
+}
+
+#[test]
+fn two_hop_blinded_path_success() {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+       let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
+
+       let amt_msat = 5000;
+       let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
+       let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+               nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
+               &chanmon_cfgs[2].keys_manager);
+
+       nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+       check_added_monitors(&nodes[0], 1);
+       pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], amt_msat, payment_hash, payment_secret);
+       claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
+}
+
+#[derive(PartialEq)]
+enum ReceiveCheckFail {
+       // The recipient fails the payment upon `PaymentClaimable`.
+       RecipientFail,
+       // Failure to decode the recipient's onion payload.
+       OnionDecodeFail,
+       // The incoming HTLC did not satisfy our requirements; in this case it underpaid us according to
+       // the expected receive amount in the onion.
+       ReceiveRequirements,
+       // The incoming HTLC errors when added to the Channel, in this case due to the HTLC being
+       // delivered out-of-order with a shutdown message.
+       ChannelCheck,
+       // The HTLC is successfully added to the inbound channel but fails receive checks in
+       // process_pending_htlc_forwards.
+       ProcessPendingHTLCsCheck,
+       // The HTLC violates the `PaymentConstraints` contained within the receiver's encrypted payload.
+       PaymentConstraints,
+}
+
+#[test]
+fn multi_hop_receiver_fail() {
+       do_multi_hop_receiver_fail(ReceiveCheckFail::RecipientFail);
+       do_multi_hop_receiver_fail(ReceiveCheckFail::OnionDecodeFail);
+       do_multi_hop_receiver_fail(ReceiveCheckFail::ReceiveRequirements);
+       do_multi_hop_receiver_fail(ReceiveCheckFail::ChannelCheck);
+       do_multi_hop_receiver_fail(ReceiveCheckFail::ProcessPendingHTLCsCheck);
+       do_multi_hop_receiver_fail(ReceiveCheckFail::PaymentConstraints);
+}
+
+fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) {
+       // Test that the receiver to a multihop blinded path fails back correctly.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       // We need the session priv to construct an invalid onion packet later.
+       let session_priv = [3; 32];
+       *nodes[0].keys_manager.override_random_bytes.lock().unwrap() = Some(session_priv);
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+       let (chan_upd_1_2, chan_id_1_2) = {
+               let (chan_upd, _, channel_id, ..) = create_announced_chan_between_nodes_with_value(
+                       &nodes, 1, 2, 1_000_000, 0
+               );
+               (chan_upd.contents, channel_id)
+       };
+
+       let amt_msat = 5000;
+       let final_cltv_delta = if check == ReceiveCheckFail::ProcessPendingHTLCsCheck {
+               // Set the final CLTV expiry too low to trigger the failure in process_pending_htlc_forwards.
+               Some(TEST_FINAL_CLTV as u16 - 2)
+       } else { None };
+       let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), final_cltv_delta);
+       let mut route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+               nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
+               &chanmon_cfgs[2].keys_manager);
+
+       let route = if check == ReceiveCheckFail::ProcessPendingHTLCsCheck {
+               let mut route = get_route(&nodes[0], &route_params).unwrap();
+               // Set the final CLTV expiry too low to trigger the failure in process_pending_htlc_forwards.
+               route.paths[0].blinded_tail.as_mut().map(|bt| bt.excess_final_cltv_expiry_delta = TEST_FINAL_CLTV - 2);
+               route
+       } else if check == ReceiveCheckFail::PaymentConstraints {
+               // Create a blinded path where the receiver's encrypted payload has an htlc_minimum_msat that is
+               // violated by `amt_msat`, and stick it in the route_params without changing the corresponding
+               // BlindedPayInfo (to ensure pathfinding still succeeds).
+               let high_htlc_min_bp = {
+                       let mut high_htlc_minimum_upd = chan_upd_1_2.clone();
+                       high_htlc_minimum_upd.htlc_minimum_msat = amt_msat + 1000;
+                       let high_htlc_min_params = get_blinded_route_parameters(amt_msat, payment_secret,
+                               nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&high_htlc_minimum_upd],
+                               &chanmon_cfgs[2].keys_manager);
+                       if let Payee::Blinded { route_hints, .. } = high_htlc_min_params.payment_params.payee {
+                               route_hints[0].1.clone()
+                       } else { panic!() }
+               };
+               if let Payee::Blinded { ref mut route_hints, .. } = route_params.payment_params.payee {
+                       route_hints[0].1 = high_htlc_min_bp;
+               } else { panic!() }
+               find_route(&nodes[0], &route_params).unwrap()
+       } else {
+               find_route(&nodes[0], &route_params).unwrap()
+       };
+       node_cfgs[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+       nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+       check_added_monitors(&nodes[0], 1);
+
+       let mut payment_event_0_1 = {
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+               SendEvent::from_event(ev)
+       };
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]);
+       check_added_monitors!(nodes[1], 0);
+       do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, false, false);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       check_added_monitors!(&nodes[1], 1);
+
+       let mut payment_event_1_2 = {
+               let mut events = nodes[1].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
+               SendEvent::from_event(ev)
+       };
+
+       match check {
+               ReceiveCheckFail::RecipientFail => {
+                       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]);
+                       check_added_monitors!(nodes[2], 0);
+                       do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
+                       expect_pending_htlcs_forwardable!(nodes[2]);
+                       check_payment_claimable(
+                               &nodes[2].node.get_and_clear_pending_events()[0], payment_hash, payment_secret, amt_msat,
+                               None, nodes[2].node.get_our_node_id()
+                       );
+                       nodes[2].node.fail_htlc_backwards(&payment_hash);
+                       expect_pending_htlcs_forwardable_conditions(
+                               nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]
+                       );
+                       nodes[2].node.process_pending_htlc_forwards();
+                       check_added_monitors!(nodes[2], 1);
+               },
+               ReceiveCheckFail::OnionDecodeFail => {
+                       let session_priv = SecretKey::from_slice(&session_priv).unwrap();
+                       let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
+                       let cur_height = nodes[0].best_block_info().1;
+                       let (mut onion_payloads, ..) = onion_utils::build_onion_payloads(
+                               &route.paths[0], amt_msat, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
+
+                       let update_add = &mut payment_event_1_2.msgs[0];
+                       onion_payloads.last_mut().map(|p| {
+                               if let msgs::OutboundOnionPayload::BlindedReceive { ref mut intro_node_blinding_point, .. } = p {
+                                       // The receiver should error if both the update_add blinding_point and the
+                                       // intro_node_blinding_point are set.
+                                       assert!(intro_node_blinding_point.is_none() && update_add.blinding_point.is_some());
+                                       *intro_node_blinding_point = Some(PublicKey::from_slice(&[2; 33]).unwrap());
+                               } else { panic!() }
+                       });
+                       update_add.onion_routing_packet = onion_utils::construct_onion_packet(
+                               vec![onion_payloads.pop().unwrap()], vec![onion_keys.pop().unwrap()], [0; 32],
+                               &payment_hash
+                       ).unwrap();
+                       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update_add);
+                       check_added_monitors!(nodes[2], 0);
+                       do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
+               },
+               ReceiveCheckFail::ReceiveRequirements => {
+                       let update_add = &mut payment_event_1_2.msgs[0];
+                       update_add.amount_msat -= 1;
+                       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update_add);
+                       check_added_monitors!(nodes[2], 0);
+                       do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
+               },
+               ReceiveCheckFail::ChannelCheck => {
+                       nodes[2].node.close_channel(&chan_id_1_2, &nodes[1].node.get_our_node_id()).unwrap();
+                       let node_2_shutdown = get_event_msg!(nodes[2], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+                       nodes[1].node.handle_shutdown(&nodes[2].node.get_our_node_id(), &node_2_shutdown);
+                       let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
+
+                       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]);
+                       nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event_1_2.commitment_msg);
+                       check_added_monitors!(nodes[2], 1);
+
+                       nodes[2].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
+                       commitment_signed_dance!(nodes[2], nodes[1], (), false, true, false, false);
+               },
+               ReceiveCheckFail::ProcessPendingHTLCsCheck => {
+                       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]);
+                       check_added_monitors!(nodes[2], 0);
+                       do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
+                       expect_pending_htlcs_forwardable!(nodes[2]);
+                       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[2],
+                               vec![HTLCDestination::FailedPayment { payment_hash }]);
+                       check_added_monitors!(nodes[2], 1);
+               },
+               ReceiveCheckFail::PaymentConstraints => {
+                       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]);
+                       check_added_monitors!(nodes[2], 0);
+                       do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
+               }
+       }
+
+       let updates_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+       assert_eq!(updates_2_1.update_fail_malformed_htlcs.len(), 1);
+       let update_malformed = &updates_2_1.update_fail_malformed_htlcs[0];
+       assert_eq!(update_malformed.sha256_of_onion, [0; 32]);
+       assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
+       nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), update_malformed);
+       do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, true, false);
+
+       let updates_1_0 = if check == ReceiveCheckFail::ChannelCheck {
+               let events = nodes[1].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 2);
+               events.into_iter().find_map(|ev| {
+                       match ev {
+                               MessageSendEvent:: UpdateHTLCs { node_id, updates } => {
+                                       assert_eq!(node_id, nodes[0].node.get_our_node_id());
+                                       return Some(updates)
+                               },
+                               MessageSendEvent::SendClosingSigned { .. } => None,
+                               _ => panic!()
+                       }
+               }).unwrap()
+       } else { get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()) };
+       assert_eq!(updates_1_0.update_fail_htlcs.len(), 1);
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_1_0.update_fail_htlcs[0]);
+       do_commitment_signed_dance(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, false, false);
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+               PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+}
index 3552748b31c0955bfe35e8302fcfae71ffd267b8..672e3aa862e460e94f26451ebff4ff39293eb4e9 100644 (file)
@@ -485,9 +485,11 @@ impl TxCreationKeys {
 }
 
 /// The maximum length of a script returned by get_revokeable_redeemscript.
-// Calculated as 6 bytes of opcodes, 1 byte push plus 2 bytes for contest_delay, and two public
-// keys of 33 bytes (+ 1 push).
-pub const REVOKEABLE_REDEEMSCRIPT_MAX_LENGTH: usize = 6 + 3 + 34*2;
+// Calculated as 6 bytes of opcodes, 1 byte push plus 3 bytes for contest_delay, and two public
+// keys of 33 bytes (+ 1 push). Generally, pushes are only 2 bytes (for values below 0x7fff, i.e.
+// around 7 months), however, a 7 month contest delay shouldn't result in being unable to reclaim
+// on-chain funds.
+pub const REVOKEABLE_REDEEMSCRIPT_MAX_LENGTH: usize = 6 + 4 + 34*2;
 
 /// A script either spendable by the revocation
 /// key or the broadcaster_delayed_payment_key and satisfying the relative-locktime OP_CSV constrain.
index 469eddd642f68686765896826a2621bf03c154f6..cd98418c5204d369759637fc88667b6d4255dca5 100644 (file)
@@ -37,11 +37,12 @@ use crate::chain::BestBlock;
 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
 use crate::chain::transaction::{OutPoint, TransactionData};
-use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
+use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
+use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
 use crate::events::ClosureReason;
 use crate::routing::gossip::NodeId;
 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
-use crate::util::logger::Logger;
+use crate::util::logger::{Logger, Record, WithContext};
 use crate::util::errors::APIError;
 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
 use crate::util::scid_utils::scid_from_parts;
@@ -165,6 +166,7 @@ struct InboundHTLCOutput {
        state: InboundHTLCState,
 }
 
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
 enum OutboundHTLCState {
        /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
        /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
@@ -198,6 +200,7 @@ enum OutboundHTLCState {
 }
 
 #[derive(Clone)]
+#[cfg_attr(test, derive(Debug, PartialEq))]
 enum OutboundHTLCOutcome {
        /// LDK version 0.0.105+ will always fill in the preimage here.
        Success(Option<PaymentPreimage>),
@@ -222,6 +225,7 @@ impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
        }
 }
 
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
 struct OutboundHTLCOutput {
        htlc_id: u64,
        amount_msat: u64,
@@ -229,10 +233,12 @@ struct OutboundHTLCOutput {
        payment_hash: PaymentHash,
        state: OutboundHTLCState,
        source: HTLCSource,
+       blinding_point: Option<PublicKey>,
        skimmed_fee_msat: Option<u64>,
 }
 
 /// See AwaitingRemoteRevoke ChannelState for more info
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
 enum HTLCUpdateAwaitingACK {
        AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
                // always outbound
@@ -243,6 +249,7 @@ enum HTLCUpdateAwaitingACK {
                onion_routing_packet: msgs::OnionPacket,
                // The extra fee we're skimming off the top of this HTLC.
                skimmed_fee_msat: Option<u64>,
+               blinding_point: Option<PublicKey>,
        },
        ClaimHTLC {
                payment_preimage: PaymentPreimage,
@@ -252,78 +259,300 @@ enum HTLCUpdateAwaitingACK {
                htlc_id: u64,
                err_packet: msgs::OnionErrorPacket,
        },
+       FailMalformedHTLC {
+               htlc_id: u64,
+               failure_code: u16,
+               sha256_of_onion: [u8; 32],
+       },
+}
+
+macro_rules! define_state_flags {
+       ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
+               #[doc = $flag_type_doc]
+               #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
+               struct $flag_type(u32);
+
+               impl $flag_type {
+                       $(
+                               #[doc = $flag_doc]
+                               const $flag: $flag_type = $flag_type($value);
+                       )*
+
+                       /// All flags that apply to the specified [`ChannelState`] variant.
+                       #[allow(unused)]
+                       const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
+
+                       #[allow(unused)]
+                       fn new() -> Self { Self(0) }
+
+                       #[allow(unused)]
+                       fn from_u32(flags: u32) -> Result<Self, ()> {
+                               if flags & !Self::ALL.0 != 0 {
+                                       Err(())
+                               } else {
+                                       Ok($flag_type(flags))
+                               }
+                       }
+
+                       #[allow(unused)]
+                       fn is_empty(&self) -> bool { self.0 == 0 }
+
+                       #[allow(unused)]
+                       fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
+               }
+
+               impl core::ops::Not for $flag_type {
+                       type Output = Self;
+                       fn not(self) -> Self::Output { Self(!self.0) }
+               }
+               impl core::ops::BitOr for $flag_type {
+                       type Output = Self;
+                       fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
+               }
+               impl core::ops::BitOrAssign for $flag_type {
+                       fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
+               }
+               impl core::ops::BitAnd for $flag_type {
+                       type Output = Self;
+                       fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
+               }
+               impl core::ops::BitAndAssign for $flag_type {
+                       fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
+               }
+       };
+       ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
+               define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
+       };
+       ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
+               define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
+               impl core::ops::BitOr<FundedStateFlags> for $flag_type {
+                       type Output = Self;
+                       fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
+               }
+               impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
+                       fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
+               }
+               impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
+                       type Output = Self;
+                       fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
+               }
+               impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
+                       fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
+               }
+               impl PartialEq<FundedStateFlags> for $flag_type {
+                       fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
+               }
+               impl From<FundedStateFlags> for $flag_type {
+                       fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
+               }
+       };
+}
+
+/// We declare all the states/flags here together to help determine which bits are still available
+/// to choose.
+mod state_flags {
+       pub const OUR_INIT_SENT: u32 = 1 << 0;
+       pub const THEIR_INIT_SENT: u32 = 1 << 1;
+       pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
+       pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
+       pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
+       pub const OUR_CHANNEL_READY: u32 = 1 << 5;
+       pub const CHANNEL_READY: u32 = 1 << 6;
+       pub const PEER_DISCONNECTED: u32 = 1 << 7;
+       pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
+       pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
+       pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
+       pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
+       pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
+       pub const WAITING_FOR_BATCH: u32 = 1 << 13;
 }
 
-/// There are a few "states" and then a number of flags which can be applied:
-/// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
-/// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
-/// move on to `ChannelReady`.
-/// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
-/// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
-/// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
+define_state_flags!(
+       "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
+       FundedStateFlags, [
+               ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
+                       until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
+               ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
+                       somewhere and we should pause sending any outbound messages until they've managed to \
+                       complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
+               ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
+                       any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
+                       message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
+               ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
+                       the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
+       ]
+);
+
+define_state_flags!(
+       "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
+       NegotiatingFundingFlags, [
+               ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
+                       OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
+               ("Indicates we have received their `open_channel`/`accept_channel` message.",
+                       THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
+       ]
+);
+
+define_state_flags!(
+       "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
+       FUNDED_STATE, AwaitingChannelReadyFlags, [
+               ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
+                       `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
+                       THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
+               ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
+                       `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
+                       OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
+               ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
+                       is being held until all channels in the batch have received `funding_signed` and have \
+                       their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
+       ]
+);
+
+define_state_flags!(
+       "Flags that only apply to [`ChannelState::ChannelReady`].",
+       FUNDED_STATE, ChannelReadyFlags, [
+               ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
+                       `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
+                       messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
+                       implicit ACK, so instead we have to hold them away temporarily to be sent later.",
+                       AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
+       ]
+);
+
+#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
 enum ChannelState {
-       /// Implies we have (or are prepared to) send our open_channel/accept_channel message
-       OurInitSent = 1 << 0,
-       /// Implies we have received their `open_channel`/`accept_channel` message
-       TheirInitSent = 1 << 1,
-       /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
-       /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
-       /// upon receipt of `funding_created`, so simply skip this state.
-       FundingCreated = 4,
-       /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
-       /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
-       /// and our counterparty consider the funding transaction confirmed.
-       FundingSent = 8,
-       /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
-       /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
-       TheirChannelReady = 1 << 4,
-       /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
-       /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
-       OurChannelReady = 1 << 5,
-       ChannelReady = 64,
-       /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
-       /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
-       /// dance.
-       PeerDisconnected = 1 << 7,
-       /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
-       /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
-       /// sending any outbound messages until they've managed to finish.
-       MonitorUpdateInProgress = 1 << 8,
-       /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
-       /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
-       /// messages as then we will be unable to determine which HTLCs they included in their
-       /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
-       /// later.
-       /// Flag is set on `ChannelReady`.
-       AwaitingRemoteRevoke = 1 << 9,
-       /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
-       /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
-       /// to respond with our own shutdown message when possible.
-       RemoteShutdownSent = 1 << 10,
-       /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
-       /// point, we may not add any new HTLCs to the channel.
-       LocalShutdownSent = 1 << 11,
-       /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
-       /// to drop us, but we store this anyway.
-       ShutdownComplete = 4096,
-       /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
-       /// broadcasting of the funding transaction is being held until all channels in the batch
-       /// have received funding_signed and have their monitors persisted.
-       WaitingForBatch = 1 << 13,
+       /// We are negotiating the parameters required for the channel prior to funding it.
+       NegotiatingFunding(NegotiatingFundingFlags),
+       /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
+       /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
+       /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
+       FundingNegotiated,
+       /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
+       /// funding transaction to confirm.
+       AwaitingChannelReady(AwaitingChannelReadyFlags),
+       /// Both we and our counterparty consider the funding transaction confirmed and the channel is
+       /// now operational.
+       ChannelReady(ChannelReadyFlags),
+       /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
+       /// is about to drop us, but we store this anyway.
+       ShutdownComplete,
+}
+
+macro_rules! impl_state_flag {
+       ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
+               #[allow(unused)]
+               fn $get(&self) -> bool {
+                       match self {
+                               $(
+                                       ChannelState::$state(flags) => flags.is_set($state_flag.into()),
+                               )*
+                               _ => false,
+                       }
+               }
+               #[allow(unused)]
+               fn $set(&mut self) {
+                       match self {
+                               $(
+                                       ChannelState::$state(flags) => *flags |= $state_flag,
+                               )*
+                               _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
+                       }
+               }
+               #[allow(unused)]
+               fn $clear(&mut self) {
+                       match self {
+                               $(
+                                       ChannelState::$state(flags) => *flags &= !($state_flag),
+                               )*
+                               _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
+                       }
+               }
+       };
+       ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
+               impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
+       };
+       ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
+               impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
+       };
+}
+
+impl ChannelState {
+       fn from_u32(state: u32) -> Result<Self, ()> {
+               match state {
+                       state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
+                       state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
+                       val => {
+                               if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
+                                       AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
+                                               .map(|flags| ChannelState::AwaitingChannelReady(flags))
+                               } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
+                                       ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
+                                               .map(|flags| ChannelState::ChannelReady(flags))
+                               } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
+                                       Ok(ChannelState::NegotiatingFunding(flags))
+                               } else {
+                                       Err(())
+                               }
+                       },
+               }
+       }
+
+       fn to_u32(&self) -> u32 {
+               match self {
+                       ChannelState::NegotiatingFunding(flags) => flags.0,
+                       ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
+                       ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
+                       ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
+                       ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
+               }
+       }
+
+       fn is_pre_funded_state(&self) -> bool {
+               matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
+       }
+
+       fn is_both_sides_shutdown(&self) -> bool {
+               self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
+       }
+
+       fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
+               match self {
+                       ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
+                       ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
+                       _ => FundedStateFlags::new(),
+               }
+       }
+
+       fn should_force_holding_cell(&self) -> bool {
+               match self {
+                       ChannelState::ChannelReady(flags) =>
+                               flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
+                                       flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
+                                       flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
+                       _ => {
+                               debug_assert!(false, "The holding cell is only valid within ChannelReady");
+                               false
+                       },
+               }
+       }
+
+       impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
+               FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
+       impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
+               FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
+       impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
+               FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
+       impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
+               FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
+       impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
+               AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
+       impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
+               AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
+       impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
+               AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
+       impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
+               ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
 }
-const BOTH_SIDES_SHUTDOWN_MASK: u32 =
-       ChannelState::LocalShutdownSent as u32 |
-       ChannelState::RemoteShutdownSent as u32;
-const MULTI_STATE_FLAGS: u32 =
-       BOTH_SIDES_SHUTDOWN_MASK |
-       ChannelState::PeerDisconnected as u32 |
-       ChannelState::MonitorUpdateInProgress as u32;
-const STATE_FLAGS: u32 =
-       MULTI_STATE_FLAGS |
-       ChannelState::TheirChannelReady as u32 |
-       ChannelState::OurChannelReady as u32 |
-       ChannelState::AwaitingRemoteRevoke as u32 |
-       ChannelState::WaitingForBatch as u32;
 
 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
@@ -407,6 +636,33 @@ impl fmt::Display for ChannelError {
        }
 }
 
+pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
+       pub logger: &'a L,
+       pub peer_id: Option<PublicKey>,
+       pub channel_id: Option<ChannelId>,
+}
+
+impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
+       fn log(&self, mut record: Record) {
+               record.peer_id = self.peer_id;
+               record.channel_id = self.channel_id;
+               self.logger.log(record)
+       }
+}
+
+impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
+where L::Target: Logger {
+       pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
+       where S::Target: SignerProvider
+       {
+               WithChannelContext {
+                       logger,
+                       peer_id: Some(context.counterparty_node_id),
+                       channel_id: Some(context.channel_id),
+               }
+       }
+}
+
 macro_rules! secp_check {
        ($res: expr, $err: expr) => {
                match $res {
@@ -476,9 +732,10 @@ struct CommitmentStats<'a> {
        total_fee_sat: u64, // the total fee included in the transaction
        num_nondust_htlcs: usize,  // the number of HTLC outputs (dust HTLCs *non*-included)
        htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
-       local_balance_msat: u64, // local balance before fees but considering dust limits
-       remote_balance_msat: u64, // remote balance before fees but considering dust limits
-       preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
+       local_balance_msat: u64, // local balance before fees *not* considering dust limits
+       remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
+       outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
+       inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
 }
 
 /// Used when calculating whether we or the remote can afford an additional HTLC.
@@ -541,7 +798,6 @@ pub(super) struct MonitorRestoreUpdates {
 pub(super) struct SignerResumeUpdates {
        pub commitment_update: Option<msgs::CommitmentUpdate>,
        pub funding_signed: Option<msgs::FundingSigned>,
-       pub funding_created: Option<msgs::FundingCreated>,
        pub channel_ready: Option<msgs::ChannelReady>,
 }
 
@@ -565,6 +821,8 @@ pub(crate) struct ShutdownResult {
        /// An unbroadcasted batch funding transaction id. The closure of this channel should be
        /// propagated to the remainder of the batch.
        pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
+       pub(crate) channel_id: ChannelId,
+       pub(crate) counterparty_node_id: PublicKey,
 }
 
 /// If the majority of the channels funds are to the fundee and the initiator holds only just
@@ -647,7 +905,7 @@ pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
 
 impl<'a, SP: Deref> ChannelPhase<SP> where
        SP::Target: SignerProvider,
-       <SP::Target as SignerProvider>::Signer: ChannelSigner,
+       <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
 {
        pub fn context(&'a self) -> &'a ChannelContext<SP> {
                match self {
@@ -706,7 +964,7 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
        /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
        /// Will be `None` for channels created prior to 0.0.115.
        temporary_channel_id: Option<ChannelId>,
-       channel_state: u32,
+       channel_state: ChannelState,
 
        // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
        // our peer. However, we want to make sure they received it, or else rebroadcast it when we
@@ -725,7 +983,7 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
 
        latest_monitor_update_id: u64,
 
-       holder_signer: ChannelSignerType<<SP::Target as SignerProvider>::Signer>,
+       holder_signer: ChannelSignerType<SP>,
        shutdown_scriptpubkey: Option<ShutdownScript>,
        destination_script: ScriptBuf,
 
@@ -1003,49 +1261,55 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        /// Returns true if we've ever received a message from the remote end for this Channel
        pub fn have_received_message(&self) -> bool {
-               self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
+               self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
        }
 
        /// Returns true if this channel is fully established and not known to be closing.
        /// Allowed in any state (including after shutdown)
        pub fn is_usable(&self) -> bool {
-               let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
-               (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
+               matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
+                       !self.channel_state.is_local_shutdown_sent() &&
+                       !self.channel_state.is_remote_shutdown_sent() &&
+                       !self.monitor_pending_channel_ready
        }
 
        /// shutdown state returns the state of the channel in its various stages of shutdown
        pub fn shutdown_state(&self) -> ChannelShutdownState {
-               if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
-                       return ChannelShutdownState::ShutdownComplete;
-               }
-               if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 &&  self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
-                       return ChannelShutdownState::ShutdownInitiated;
-               }
-               if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
-                       return ChannelShutdownState::ResolvingHTLCs;
-               }
-               if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
-                       return ChannelShutdownState::NegotiatingClosingFee;
+               match self.channel_state {
+                       ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
+                               if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
+                                       ChannelShutdownState::ShutdownInitiated
+                               } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
+                                       ChannelShutdownState::ResolvingHTLCs
+                               } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
+                                       ChannelShutdownState::NegotiatingClosingFee
+                               } else {
+                                       ChannelShutdownState::NotShuttingDown
+                               },
+                       ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
+                       _ => ChannelShutdownState::NotShuttingDown,
                }
-               return ChannelShutdownState::NotShuttingDown;
        }
 
        fn closing_negotiation_ready(&self) -> bool {
+               let is_ready_to_close = match self.channel_state {
+                       ChannelState::AwaitingChannelReady(flags) =>
+                               flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+                       ChannelState::ChannelReady(flags) =>
+                               flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+                       _ => false,
+               };
                self.pending_inbound_htlcs.is_empty() &&
-               self.pending_outbound_htlcs.is_empty() &&
-               self.pending_update_fee.is_none() &&
-               self.channel_state &
-               (BOTH_SIDES_SHUTDOWN_MASK |
-                       ChannelState::AwaitingRemoteRevoke as u32 |
-                       ChannelState::PeerDisconnected as u32 |
-                       ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
+                       self.pending_outbound_htlcs.is_empty() &&
+                       self.pending_update_fee.is_none() &&
+                       is_ready_to_close
        }
 
        /// Returns true if this channel is currently available for use. This is a superset of
        /// is_usable() and considers things like the channel being temporarily disabled.
        /// Allowed in any state (including after shutdown)
        pub fn is_live(&self) -> bool {
-               self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
+               self.is_usable() && !self.channel_state.is_peer_disconnected()
        }
 
        // Public utilities:
@@ -1095,7 +1359,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        /// Returns the holder signer for this channel.
        #[cfg(test)]
-       pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
+       pub fn get_signer(&self) -> &ChannelSignerType<SP> {
                return &self.holder_signer
        }
 
@@ -1297,8 +1561,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
        /// Returns true if funding_signed was sent/received and the
        /// funding transaction has been broadcast if necessary.
        pub fn is_funding_broadcast(&self) -> bool {
-               self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
-                       self.channel_state & ChannelState::WaitingForBatch as u32 == 0
+               !self.channel_state.is_pre_funded_state() &&
+                       !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
        }
 
        /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
@@ -1392,6 +1656,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        }
                }
 
+               let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
+
                for ref htlc in self.pending_inbound_htlcs.iter() {
                        let (include, state_name) = match htlc.state {
                                InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
@@ -1409,7 +1675,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                                match &htlc.state {
                                        &InboundHTLCState::LocalRemoved(ref reason) => {
                                                if generated_by_local {
-                                                       if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+                                                       if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
+                                                               inbound_htlc_preimages.push(preimage);
                                                                value_to_self_msat_offset += htlc.amount_msat as i64;
                                                        }
                                                }
@@ -1419,7 +1686,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        }
                }
 
-               let mut preimages: Vec<PaymentPreimage> = Vec::new();
+
+               let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
 
                for ref htlc in self.pending_outbound_htlcs.iter() {
                        let (include, state_name) = match htlc.state {
@@ -1438,7 +1706,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        };
 
                        if let Some(preimage) = preimage_opt {
-                               preimages.push(preimage);
+                               outbound_htlc_preimages.push(preimage);
                        }
 
                        if include {
@@ -1460,13 +1728,13 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        }
                }
 
-               let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
+               let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
                assert!(value_to_self_msat >= 0);
                // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
                // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
                // "violate" their reserve value by couting those against it. Thus, we have to convert
                // everything to i64 before subtracting as otherwise we can overflow.
-               let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
+               let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
                assert!(value_to_remote_msat >= 0);
 
                #[cfg(debug_assertions)]
@@ -1532,10 +1800,6 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
                htlcs_included.append(&mut included_dust_htlcs);
 
-               // For the stats, trimmed-to-0 the value in msats accordingly
-               value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
-               value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
-
                CommitmentStats {
                        tx,
                        feerate_per_kw,
@@ -1544,7 +1808,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        htlcs_included,
                        local_balance_msat: value_to_self_msat as u64,
                        remote_balance_msat: value_to_remote_msat as u64,
-                       preimages
+                       inbound_htlc_preimages,
+                       outbound_htlc_preimages,
                }
        }
 
@@ -1605,7 +1870,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                if let Some(feerate) = outbound_feerate_update {
                        feerate_per_kw = cmp::max(feerate_per_kw, feerate);
                }
-               cmp::max(2530, feerate_per_kw * 1250 / 1000)
+               let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
+               cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
        }
 
        /// Get forwarding information for the counterparty.
@@ -2047,11 +2313,14 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
                where F: Fn() -> Option<O> {
-               if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
-                  self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
-                       f()
-               } else {
-                       None
+               match self.channel_state {
+                       ChannelState::FundingNegotiated => f(),
+                       ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
+                               f()
+                       } else {
+                               None
+                       },
+                       _ => None,
                }
        }
 
@@ -2090,7 +2359,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                // called during initialization prior to the chain_monitor in the encompassing ChannelManager
                // being fully configured in some cases. Thus, its likely any monitor events we generate will
                // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
-               assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
+               assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
 
                // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
                // return them to fail the payment.
@@ -2105,61 +2374,39 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        }
                }
                let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
-                       // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
+                       // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
                        // returning a channel monitor update here would imply a channel monitor update before
                        // we even registered the channel monitor to begin with, which is invalid.
                        // Thus, if we aren't actually at a point where we could conceivably broadcast the
                        // funding transaction, don't return a funding txo (which prevents providing the
                        // monitor update to the user, even if we return one).
                        // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
-                       if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
+                       let generate_monitor_update = match self.channel_state {
+                               ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
+                               _ => false,
+                       };
+                       if generate_monitor_update {
                                self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
                                Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
                                        update_id: self.latest_monitor_update_id,
+                                       counterparty_node_id: Some(self.counterparty_node_id),
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
                                }))
                        } else { None }
                } else { None };
                let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
 
-               self.channel_state = ChannelState::ShutdownComplete as u32;
+               self.channel_state = ChannelState::ShutdownComplete;
                self.update_time_counter += 1;
                ShutdownResult {
                        monitor_update,
                        dropped_outbound_htlcs,
                        unbroadcasted_batch_funding_txid,
+                       channel_id: self.channel_id,
+                       counterparty_node_id: self.counterparty_node_id,
                }
        }
 
-       /// Only allowed after [`Self::channel_transaction_parameters`] is set.
-       fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
-               let counterparty_keys = self.build_remote_transaction_keys();
-               let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
-               let signature = match &self.holder_signer {
-                       // TODO (taproot|arik): move match into calling method for Taproot
-                       ChannelSignerType::Ecdsa(ecdsa) => {
-                               ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
-                                       .map(|(sig, _)| sig).ok()?
-                       }
-               };
-
-               if self.signer_pending_funding {
-                       log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
-                       self.signer_pending_funding = false;
-               }
-
-               Some(msgs::FundingCreated {
-                       temporary_channel_id: self.temporary_channel_id.unwrap(),
-                       funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
-                       funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
-                       signature,
-                       #[cfg(taproot)]
-                       partial_signature_with_nonce: None,
-                       #[cfg(taproot)]
-                       next_local_nonce: None,
-               })
-       }
-
        /// Only allowed after [`Self::channel_transaction_parameters`] is set.
        fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
                let counterparty_keys = self.build_remote_transaction_keys();
@@ -2173,7 +2420,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                match &self.holder_signer {
                        // TODO (arik): move match into calling method for Taproot
                        ChannelSignerType::Ecdsa(ecdsa) => {
-                               let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
+                               let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
                                        .map(|(signature, _)| msgs::FundingSigned {
                                                channel_id: self.channel_id(),
                                                signature,
@@ -2183,8 +2430,13 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                                        .ok();
 
                                if funding_signed.is_none() {
-                                       log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
-                                       self.signer_pending_funding = true;
+                                       #[cfg(not(async_signing))] {
+                                               panic!("Failed to get signature for funding_signed");
+                                       }
+                                       #[cfg(async_signing)] {
+                                               log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
+                                               self.signer_pending_funding = true;
+                                       }
                                } else if self.signer_pending_funding {
                                        log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
                                        self.signer_pending_funding = false;
@@ -2192,7 +2444,10 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
                                // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
                                (counterparty_initial_commitment_tx, funding_signed)
-                       }
+                       },
+                       // TODO (taproot|arik)
+                       #[cfg(taproot)]
+                       _ => todo!()
                }
        }
 }
@@ -2269,9 +2524,67 @@ struct CommitmentTxInfoCached {
        feerate: u32,
 }
 
+/// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
+/// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
+trait FailHTLCContents {
+       type Message: FailHTLCMessageName;
+       fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
+       fn to_inbound_htlc_state(self) -> InboundHTLCState;
+       fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
+}
+impl FailHTLCContents for msgs::OnionErrorPacket {
+       type Message = msgs::UpdateFailHTLC;
+       fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
+               msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
+       }
+       fn to_inbound_htlc_state(self) -> InboundHTLCState {
+               InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
+       }
+       fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
+               HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
+       }
+}
+impl FailHTLCContents for (u16, [u8; 32]) {
+       type Message = msgs::UpdateFailMalformedHTLC; // (failure_code, sha256_of_onion)
+       fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
+               msgs::UpdateFailMalformedHTLC {
+                       htlc_id,
+                       channel_id,
+                       failure_code: self.0,
+                       sha256_of_onion: self.1
+               }
+       }
+       fn to_inbound_htlc_state(self) -> InboundHTLCState {
+               InboundHTLCState::LocalRemoved(
+                       InboundHTLCRemovalReason::FailMalformed((self.1, self.0))
+               )
+       }
+       fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
+               HTLCUpdateAwaitingACK::FailMalformedHTLC {
+                       htlc_id,
+                       failure_code: self.0,
+                       sha256_of_onion: self.1
+               }
+       }
+}
+
+trait FailHTLCMessageName {
+       fn name() -> &'static str;
+}
+impl FailHTLCMessageName for msgs::UpdateFailHTLC {
+       fn name() -> &'static str {
+               "update_fail_htlc"
+       }
+}
+impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
+       fn name() -> &'static str {
+               "update_fail_malformed_htlc"
+       }
+}
+
 impl<SP: Deref> Channel<SP> where
        SP::Target: SignerProvider,
-       <SP::Target as SignerProvider>::Signer: WriteableEcdsaChannelSigner
+       <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
 {
        fn check_remote_fee<F: Deref, L: Deref>(
                channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
@@ -2385,7 +2698,7 @@ impl<SP: Deref> Channel<SP> where
        where L::Target: Logger {
                // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
                // (see equivalent if condition there).
-               assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
+               assert!(self.context.channel_state.should_force_holding_cell());
                let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
                let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
                self.context.latest_monitor_update_id = mon_update_id;
@@ -2399,10 +2712,9 @@ impl<SP: Deref> Channel<SP> where
                // caller thought we could have something claimed (cause we wouldn't have accepted in an
                // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
                // either.
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
                }
-               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
 
                // ChannelManager may generate duplicate claims/fails due to HTLC update events from
                // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
@@ -2450,12 +2762,13 @@ impl<SP: Deref> Channel<SP> where
                self.context.latest_monitor_update_id += 1;
                let monitor_update = ChannelMonitorUpdate {
                        update_id: self.context.latest_monitor_update_id,
+                       counterparty_node_id: Some(self.context.counterparty_node_id),
                        updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
                                payment_preimage: payment_preimage_arg.clone(),
                        }],
                };
 
-               if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+               if self.context.channel_state.should_force_holding_cell() {
                        // Note that this condition is the same as the assertion in
                        // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
                        // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
@@ -2471,7 +2784,9 @@ impl<SP: Deref> Channel<SP> where
                                                        return UpdateFulfillFetch::DuplicateClaim {};
                                                }
                                        },
-                                       &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+                                       &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
+                                               &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
+                                       {
                                                if htlc_id_arg == htlc_id {
                                                        log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
                                                        // TODO: We may actually be able to switch to a fulfill here, though its
@@ -2483,7 +2798,7 @@ impl<SP: Deref> Channel<SP> where
                                        _ => {}
                                }
                        }
-                       log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
+                       log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
                        self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
                                payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
                        });
@@ -2568,6 +2883,17 @@ impl<SP: Deref> Channel<SP> where
                        .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
        }
 
+       /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
+       /// want to fail blinded HTLCs where we are not the intro node.
+       ///
+       /// See [`Self::queue_fail_htlc`] for more info.
+       pub fn queue_fail_malformed_htlc<L: Deref>(
+               &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
+       ) -> Result<(), ChannelError> where L::Target: Logger {
+               self.fail_htlc(htlc_id_arg, (failure_code, sha256_of_onion), true, logger)
+                       .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+       }
+
        /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
        /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
        /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
@@ -2576,12 +2902,13 @@ impl<SP: Deref> Channel<SP> where
        /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
        /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
        /// [`ChannelError::Ignore`].
-       fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
-       -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+       fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
+               &mut self, htlc_id_arg: u64, err_packet: E, mut force_holding_cell: bool,
+               logger: &L
+       ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        panic!("Was asked to fail an HTLC when channel was not in an operational state");
                }
-               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
 
                // ChannelManager may generate duplicate claims/fails due to HTLC update events from
                // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
@@ -2615,7 +2942,7 @@ impl<SP: Deref> Channel<SP> where
                        return Ok(None);
                }
 
-               if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+               if self.context.channel_state.should_force_holding_cell() {
                        debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
                        force_holding_cell = true;
                }
@@ -2631,7 +2958,9 @@ impl<SP: Deref> Channel<SP> where
                                                        return Ok(None);
                                                }
                                        },
-                                       &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+                                       &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
+                                               &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
+                                       {
                                                if htlc_id_arg == htlc_id {
                                                        debug_assert!(false, "Tried to fail an HTLC that was already failed");
                                                        return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
@@ -2641,128 +2970,42 @@ impl<SP: Deref> Channel<SP> where
                                }
                        }
                        log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
-                       self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
-                               htlc_id: htlc_id_arg,
-                               err_packet,
-                       });
+                       self.context.holding_cell_htlc_updates.push(err_packet.to_htlc_update_awaiting_ack(htlc_id_arg));
                        return Ok(None);
                }
 
-               log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
+               log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
+                       E::Message::name(), &self.context.channel_id());
                {
                        let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
-                       htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
+                       htlc.state = err_packet.clone().to_inbound_htlc_state();
                }
 
-               Ok(Some(msgs::UpdateFailHTLC {
-                       channel_id: self.context.channel_id(),
-                       htlc_id: htlc_id_arg,
-                       reason: err_packet
-               }))
+               Ok(Some(err_packet.to_message(htlc_id_arg, self.context.channel_id())))
        }
 
        // Message handlers:
-
-       /// Handles a funding_signed message from the remote end.
-       /// If this call is successful, broadcast the funding transaction (and not before!)
-       pub fn funding_signed<L: Deref>(
-               &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
-       ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::Signer>, ChannelError>
-       where
-               L::Target: Logger
-       {
-               if !self.context.is_outbound() {
-                       return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
-               }
-               if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
-                       return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
-               }
-               if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
-                               self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
-                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
-                       panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
-               }
-
-               let funding_script = self.context.get_funding_redeemscript();
-
-               let counterparty_keys = self.context.build_remote_transaction_keys();
-               let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
-               let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
-               let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
-
-               log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
-                       &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
-
-               let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
-               let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
-               {
-                       let trusted_tx = initial_commitment_tx.trust();
-                       let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
-                       let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
-                       // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
-                       if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
-                               return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
-                       }
-               }
-
-               let holder_commitment_tx = HolderCommitmentTransaction::new(
-                       initial_commitment_tx,
-                       msg.signature,
-                       Vec::new(),
-                       &self.context.get_holder_pubkeys().funding_pubkey,
-                       self.context.counterparty_funding_pubkey()
-               );
-
-               self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
-                       .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
-
-
-               let funding_redeemscript = self.context.get_funding_redeemscript();
-               let funding_txo = self.context.get_funding_txo().unwrap();
-               let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
-               let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
-               let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
-               let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
-               monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
-               let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
-                                                         shutdown_script, self.context.get_holder_selected_contest_delay(),
-                                                         &self.context.destination_script, (funding_txo, funding_txo_script),
-                                                         &self.context.channel_transaction_parameters,
-                                                         funding_redeemscript.clone(), self.context.channel_value_satoshis,
-                                                         obscure_factor,
-                                                         holder_commitment_tx, best_block, self.context.counterparty_node_id);
-
-               channel_monitor.provide_initial_counterparty_commitment_tx(
-                       counterparty_initial_bitcoin_tx.txid, Vec::new(),
-                       self.context.cur_counterparty_commitment_transaction_number,
-                       self.context.counterparty_cur_commitment_point.unwrap(),
-                       counterparty_initial_commitment_tx.feerate_per_kw(),
-                       counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
-                       counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
-
-               assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
-               if self.context.is_batch_funding() {
-                       self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
-               } else {
-                       self.context.channel_state = ChannelState::FundingSent as u32;
-               }
-               self.context.cur_holder_commitment_transaction_number -= 1;
-               self.context.cur_counterparty_commitment_transaction_number -= 1;
-
-               log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
-
-               let need_channel_ready = self.check_get_channel_ready(0).is_some();
-               self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
-               Ok(channel_monitor)
-       }
-
        /// Updates the state of the channel to indicate that all channels in the batch have received
        /// funding_signed and persisted their monitors.
        /// The funding transaction is consequently allowed to be broadcast, and the channel can be
        /// treated as a non-batch channel going forward.
        pub fn set_batch_ready(&mut self) {
                self.context.is_batch_funding = None;
-               self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
+               self.context.channel_state.clear_waiting_for_batch();
+       }
+
+       /// Unsets the existing funding information.
+       ///
+       /// This must only be used if the channel has not yet completed funding and has not been used.
+       ///
+       /// Further, the channel must be immediately shut down after this with a call to
+       /// [`ChannelContext::force_shutdown`].
+       pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
+               debug_assert!(matches!(
+                       self.context.channel_state, ChannelState::AwaitingChannelReady(_)
+               ));
+               self.context.channel_transaction_parameters.funding_outpoint = None;
+               self.context.channel_id = temporary_channel_id;
        }
 
        /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
@@ -2776,7 +3019,7 @@ impl<SP: Deref> Channel<SP> where
                NS::Target: NodeSigner,
                L::Target: Logger
        {
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        self.context.workaround_lnd_bug_4006 = Some(msg.clone());
                        return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
                }
@@ -2790,24 +3033,31 @@ impl<SP: Deref> Channel<SP> where
                        }
                }
 
-               let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
-
                // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
                // batch, but we can receive channel_ready messages.
-               debug_assert!(
-                       non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
-                       non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
-               );
-               if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
-                       self.context.channel_state |= ChannelState::TheirChannelReady as u32;
-               } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
-                       self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
-                       self.context.update_time_counter += 1;
-               } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
-                       // If we reconnected before sending our `channel_ready` they may still resend theirs:
-                       (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
-                                             (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
-               {
+               let mut check_reconnection = false;
+               match &self.context.channel_state {
+                       ChannelState::AwaitingChannelReady(flags) => {
+                               let flags = *flags & !FundedStateFlags::ALL;
+                               debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
+                               if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
+                                       // If we reconnected before sending our `channel_ready` they may still resend theirs.
+                                       check_reconnection = true;
+                               } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
+                                       self.context.channel_state.set_their_channel_ready();
+                               } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
+                                       self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
+                                       self.context.update_time_counter += 1;
+                               } else {
+                                       // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
+                                       debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
+                               }
+                       }
+                       // If we reconnected before sending our `channel_ready` they may still resend theirs.
+                       ChannelState::ChannelReady(_) => check_reconnection = true,
+                       _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
+               }
+               if check_reconnection {
                        // They probably disconnected/reconnected and re-sent the channel_ready, which is
                        // required, or they're sending a fresh SCID alias.
                        let expected_point =
@@ -2831,8 +3081,6 @@ impl<SP: Deref> Channel<SP> where
                                return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
                        }
                        return Ok(None);
-               } else {
-                       return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
                }
 
                self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
@@ -2850,17 +3098,18 @@ impl<SP: Deref> Channel<SP> where
        where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
                FE::Target: FeeEstimator, L::Target: Logger,
        {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
+                       return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
+               }
                // We can't accept HTLCs sent after we've sent a shutdown.
-               let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
-               if local_sent_shutdown {
+               if self.context.channel_state.is_local_shutdown_sent() {
                        pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
                }
                // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
-               let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
-               if remote_sent_shutdown {
+               if self.context.channel_state.is_remote_shutdown_sent() {
                        return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
                }
                if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
@@ -2995,7 +3244,7 @@ impl<SP: Deref> Channel<SP> where
                        return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
                }
 
-               if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
+               if self.context.channel_state.is_local_shutdown_sent() {
                        if let PendingHTLCStatus::Forward(_) = pending_forward_status {
                                panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
                        }
@@ -3045,10 +3294,10 @@ impl<SP: Deref> Channel<SP> where
        }
 
        pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
                }
 
@@ -3056,10 +3305,10 @@ impl<SP: Deref> Channel<SP> where
        }
 
        pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
                }
 
@@ -3068,10 +3317,10 @@ impl<SP: Deref> Channel<SP> where
        }
 
        pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
                }
 
@@ -3082,13 +3331,13 @@ impl<SP: Deref> Channel<SP> where
        pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
                where L::Target: Logger
        {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
                }
-               if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+               if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
                        return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
                }
 
@@ -3201,7 +3450,7 @@ impl<SP: Deref> Channel<SP> where
                        self.context.counterparty_funding_pubkey()
                );
 
-               self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
+               self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
                        .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
 
                // Update state now that we've passed all the can-fail calls...
@@ -3249,6 +3498,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.latest_monitor_update_id += 1;
                let mut monitor_update = ChannelMonitorUpdate {
                        update_id: self.context.latest_monitor_update_id,
+                       counterparty_node_id: Some(self.context.counterparty_node_id),
                        updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
                                commitment_tx: holder_commitment_tx,
                                htlc_outputs: htlcs_and_sigs,
@@ -3263,11 +3513,11 @@ impl<SP: Deref> Channel<SP> where
                // build_commitment_no_status_check() next which will reset this to RAAFirst.
                self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
 
-               if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
+               if self.context.channel_state.is_monitor_update_in_progress() {
                        // In case we initially failed monitor updating without requiring a response, we need
                        // to make sure the RAA gets sent first.
                        self.context.monitor_pending_revoke_and_ack = true;
-                       if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+                       if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
                                // If we were going to send a commitment_signed after the RAA, go ahead and do all
                                // the corresponding HTLC status updates so that
                                // get_last_commitment_update_for_send includes the right HTLCs.
@@ -3283,7 +3533,7 @@ impl<SP: Deref> Channel<SP> where
                        return Ok(self.push_ret_blockable_mon_update(monitor_update));
                }
 
-               let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+               let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
                        // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
                        // we'll send one right away when we get the revoke_and_ack when we
                        // free_holding_cell_htlcs().
@@ -3309,8 +3559,7 @@ impl<SP: Deref> Channel<SP> where
        ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
        where F::Target: FeeEstimator, L::Target: Logger
        {
-               if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
-                  (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
+               if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
                        self.free_holding_cell_htlcs(fee_estimator, logger)
                } else { (None, Vec::new()) }
        }
@@ -3322,13 +3571,14 @@ impl<SP: Deref> Channel<SP> where
        ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
        where F::Target: FeeEstimator, L::Target: Logger
        {
-               assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
+               assert!(!self.context.channel_state.is_monitor_update_in_progress());
                if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
                        log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
                                if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
 
                        let mut monitor_update = ChannelMonitorUpdate {
                                update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
+                               counterparty_node_id: Some(self.context.counterparty_node_id),
                                updates: Vec::new(),
                        };
 
@@ -3347,11 +3597,12 @@ impl<SP: Deref> Channel<SP> where
                                match &htlc_update {
                                        &HTLCUpdateAwaitingACK::AddHTLC {
                                                amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
-                                               skimmed_fee_msat, ..
+                                               skimmed_fee_msat, blinding_point, ..
                                        } => {
-                                               match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
-                                                       onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
-                                               {
+                                               match self.send_htlc(
+                                                       amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
+                                                       false, skimmed_fee_msat, blinding_point, fee_estimator, logger
+                                               ) {
                                                        Ok(_) => update_add_count += 1,
                                                        Err(e) => {
                                                                match e {
@@ -3404,6 +3655,20 @@ impl<SP: Deref> Channel<SP> where
                                                        }
                                                }
                                        },
+                                       &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+                                               match self.fail_htlc(htlc_id, (failure_code, sha256_of_onion), false, logger) {
+                                                       Ok(update_fail_malformed_opt) => {
+                                                               debug_assert!(update_fail_malformed_opt.is_some()); // See above comment
+                                                               update_fail_count += 1;
+                                                       },
+                                                       Err(e) => {
+                                                               if let ChannelError::Ignore(_) = e {}
+                                                               else {
+                                                                       panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
+                                                               }
+                                                       }
+                                               }
+                                       },
                                }
                        }
                        if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
@@ -3442,13 +3707,13 @@ impl<SP: Deref> Channel<SP> where
        ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
        where F::Target: FeeEstimator, L::Target: Logger,
        {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
                }
-               if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+               if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
                        return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
                }
 
@@ -3460,7 +3725,7 @@ impl<SP: Deref> Channel<SP> where
                        }
                }
 
-               if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
+               if !self.context.channel_state.is_awaiting_remote_revoke() {
                        // Our counterparty seems to have burned their coins to us (by revoking a state when we
                        // haven't given them a new commitment transaction to broadcast). We should probably
                        // take advantage of this by updating our channel monitor, sending them an error, and
@@ -3483,7 +3748,10 @@ impl<SP: Deref> Channel<SP> where
                                        self.context.cur_counterparty_commitment_transaction_number + 1,
                                        &secret
                                ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
-                       }
+                       },
+                       // TODO (taproot|arik)
+                       #[cfg(taproot)]
+                       _ => todo!()
                };
 
                self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
@@ -3491,6 +3759,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.latest_monitor_update_id += 1;
                let mut monitor_update = ChannelMonitorUpdate {
                        update_id: self.context.latest_monitor_update_id,
+                       counterparty_node_id: Some(self.context.counterparty_node_id),
                        updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
                                idx: self.context.cur_counterparty_commitment_transaction_number + 1,
                                secret: msg.per_commitment_secret,
@@ -3501,7 +3770,7 @@ impl<SP: Deref> Channel<SP> where
                // (note that we may still fail to generate the new commitment_signed message, but that's
                // OK, we step the channel here and *then* if the new generation fails we can fail the
                // channel based on that, but stepping stuff here should be safe either way.
-               self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
+               self.context.channel_state.clear_awaiting_remote_revoke();
                self.context.sent_message_awaiting_response = None;
                self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
                self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
@@ -3643,7 +3912,7 @@ impl<SP: Deref> Channel<SP> where
                        }
                }
 
-               if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
+               if self.context.channel_state.is_monitor_update_in_progress() {
                        // We can't actually generate a new commitment transaction (incl by freeing holding
                        // cells) while we can't update the monitor, so we just return what we have.
                        if require_commitment {
@@ -3765,7 +4034,7 @@ impl<SP: Deref> Channel<SP> where
                        return None;
                }
 
-               if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+               if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
                        force_holding_cell = true;
                }
 
@@ -3790,12 +4059,12 @@ impl<SP: Deref> Channel<SP> where
        /// completed.
        /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
        pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
-               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
-               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
-                       return Err(());
+               assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
+               if self.context.channel_state.is_pre_funded_state() {
+                       return Err(())
                }
 
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
+               if self.context.channel_state.is_peer_disconnected() {
                        // While the below code should be idempotent, it's simpler to just return early, as
                        // redundant disconnect events can fire, though they should be rare.
                        return Ok(());
@@ -3857,7 +4126,7 @@ impl<SP: Deref> Channel<SP> where
 
                self.context.sent_message_awaiting_response = None;
 
-               self.context.channel_state |= ChannelState::PeerDisconnected as u32;
+               self.context.channel_state.set_peer_disconnected();
                log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
                Ok(())
        }
@@ -3884,7 +4153,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.monitor_pending_forwards.append(&mut pending_forwards);
                self.context.monitor_pending_failures.append(&mut pending_fails);
                self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
-               self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
+               self.context.channel_state.set_monitor_update_in_progress();
        }
 
        /// Indicates that the latest ChannelMonitor update has been committed by the client
@@ -3898,19 +4167,22 @@ impl<SP: Deref> Channel<SP> where
                L::Target: Logger,
                NS::Target: NodeSigner
        {
-               assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
-               self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
+               assert!(self.context.channel_state.is_monitor_update_in_progress());
+               self.context.channel_state.clear_monitor_update_in_progress();
 
-               // If we're past (or at) the FundingSent stage on an outbound channel, try to
+               // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
                // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
                // first received the funding_signed.
                let mut funding_broadcastable =
-                       if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
+                       if self.context.is_outbound() &&
+                               matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
+                               matches!(self.context.channel_state, ChannelState::ChannelReady(_))
+                       {
                                self.context.funding_transaction.take()
                        } else { None };
                // That said, if the funding transaction is already confirmed (ie we're active with a
                // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
-               if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
+               if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
                        funding_broadcastable = None;
                }
 
@@ -3941,7 +4213,7 @@ impl<SP: Deref> Channel<SP> where
                let mut finalized_claimed_htlcs = Vec::new();
                mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
 
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
+               if self.context.channel_state.is_peer_disconnected() {
                        self.context.monitor_pending_revoke_and_ack = false;
                        self.context.monitor_pending_commitment_signed = false;
                        return MonitorRestoreUpdates {
@@ -3978,7 +4250,7 @@ impl<SP: Deref> Channel<SP> where
                if self.context.is_outbound() {
                        return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
                }
                Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
@@ -4006,7 +4278,7 @@ impl<SP: Deref> Channel<SP> where
 
        /// Indicates that the signer may have some signatures for us, so we should retry if we're
        /// blocked.
-       #[allow(unused)]
+       #[cfg(async_signing)]
        pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
                let commitment_update = if self.context.signer_pending_commitment_update {
                        self.get_last_commitment_update_for_send(logger).ok()
@@ -4017,20 +4289,15 @@ impl<SP: Deref> Channel<SP> where
                let channel_ready = if funding_signed.is_some() {
                        self.check_get_channel_ready(0)
                } else { None };
-               let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
-                       self.context.get_funding_created_msg(logger)
-               } else { None };
 
-               log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
+               log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
                        if commitment_update.is_some() { "a" } else { "no" },
                        if funding_signed.is_some() { "a" } else { "no" },
-                       if funding_created.is_some() { "a" } else { "no" },
                        if channel_ready.is_some() { "a" } else { "no" });
 
                SignerResumeUpdates {
                        commitment_update,
                        funding_signed,
-                       funding_created,
                        channel_ready,
                }
        }
@@ -4064,6 +4331,7 @@ impl<SP: Deref> Channel<SP> where
                                        cltv_expiry: htlc.cltv_expiry,
                                        onion_routing_packet: (**onion_packet).clone(),
                                        skimmed_fee_msat: htlc.skimmed_fee_msat,
+                                       blinding_point: htlc.blinding_point,
                                });
                        }
                }
@@ -4114,11 +4382,16 @@ impl<SP: Deref> Channel<SP> where
                        }
                        update
                } else {
-                       if !self.context.signer_pending_commitment_update {
-                               log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
-                               self.context.signer_pending_commitment_update = true;
+                       #[cfg(not(async_signing))] {
+                               panic!("Failed to get signature for new commitment state");
+                       }
+                       #[cfg(async_signing)] {
+                               if !self.context.signer_pending_commitment_update {
+                                       log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
+                                       self.context.signer_pending_commitment_update = true;
+                               }
+                               return Err(());
                        }
-                       return Err(());
                };
                Ok(msgs::CommitmentUpdate {
                        update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
@@ -4128,7 +4401,7 @@ impl<SP: Deref> Channel<SP> where
 
        /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
        pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
-               if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
+               if self.context.channel_state.is_local_shutdown_sent() {
                        assert!(self.context.shutdown_scriptpubkey.is_some());
                        Some(msgs::Shutdown {
                                channel_id: self.context.channel_id,
@@ -4152,7 +4425,7 @@ impl<SP: Deref> Channel<SP> where
                L::Target: Logger,
                NS::Target: NodeSigner
        {
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
+               if !self.context.channel_state.is_peer_disconnected() {
                        // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
                        // almost certainly indicates we are going to end up out-of-sync in some way, so we
                        // just close here instead of trying to recover.
@@ -4164,6 +4437,7 @@ impl<SP: Deref> Channel<SP> where
                        return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
                }
 
+               let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
                if msg.next_remote_commitment_number > 0 {
                        let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
                        let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
@@ -4171,7 +4445,7 @@ impl<SP: Deref> Channel<SP> where
                        if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
                                return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
                        }
-                       if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
+                       if msg.next_remote_commitment_number > our_commitment_transaction {
                                macro_rules! log_and_panic {
                                        ($err_msg: expr) => {
                                                log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
@@ -4191,26 +4465,27 @@ impl<SP: Deref> Channel<SP> where
 
                // Before we change the state of the channel, we check if the peer is sending a very old
                // commitment transaction number, if yes we send a warning message.
-               let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
-               if  msg.next_remote_commitment_number + 1 < our_commitment_transaction {
-                       return Err(
-                               ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
-                       );
+               if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
+                       return Err(ChannelError::Warn(format!(
+                               "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
+                               msg.next_remote_commitment_number,
+                               our_commitment_transaction
+                       )));
                }
 
                // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
                // remaining cases either succeed or ErrorMessage-fail).
-               self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
+               self.context.channel_state.clear_peer_disconnected();
                self.context.sent_message_awaiting_response = None;
 
                let shutdown_msg = self.get_outbound_shutdown();
 
                let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
 
-               if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
+               if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
                        // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
-                       if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
-                                       self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+                       if !self.context.channel_state.is_our_channel_ready() ||
+                                       self.context.channel_state.is_monitor_update_in_progress() {
                                if msg.next_remote_commitment_number != 0 {
                                        return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
                                }
@@ -4237,26 +4512,31 @@ impl<SP: Deref> Channel<SP> where
                        });
                }
 
-               let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
+               let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
                        // Remote isn't waiting on any RevokeAndACK from us!
                        // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
                        None
-               } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
-                       if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+               } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
+                       if self.context.channel_state.is_monitor_update_in_progress() {
                                self.context.monitor_pending_revoke_and_ack = true;
                                None
                        } else {
                                Some(self.get_last_revoke_and_ack())
                        }
                } else {
-                       return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
+                       debug_assert!(false, "All values should have been handled in the four cases above");
+                       return Err(ChannelError::Close(format!(
+                               "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
+                               msg.next_remote_commitment_number,
+                               our_commitment_transaction
+                       )));
                };
 
                // We increment cur_counterparty_commitment_transaction_number only upon receipt of
                // revoke_and_ack, not on sending commitment_signed, so we add one if have
                // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
                // the corresponding revoke_and_ack back yet.
-               let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
+               let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
                if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
                        self.mark_awaiting_response();
                }
@@ -4292,7 +4572,7 @@ impl<SP: Deref> Channel<SP> where
                                log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
                        }
 
-                       if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+                       if self.context.channel_state.is_monitor_update_in_progress() {
                                self.context.monitor_pending_commitment_signed = true;
                                Ok(ReestablishResponses {
                                        channel_ready, shutdown_msg, announcement_sigs,
@@ -4307,8 +4587,18 @@ impl<SP: Deref> Channel<SP> where
                                        order: self.context.resend_order.clone(),
                                })
                        }
+               } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
+                       Err(ChannelError::Close(format!(
+                               "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
+                               msg.next_local_commitment_number,
+                               next_counterparty_commitment_number,
+                       )))
                } else {
-                       Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
+                       Err(ChannelError::Close(format!(
+                               "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
+                               msg.next_local_commitment_number,
+                               next_counterparty_commitment_number,
+                       )))
                }
        }
 
@@ -4436,7 +4726,10 @@ impl<SP: Deref> Channel<SP> where
                                                max_fee_satoshis: our_max_fee,
                                        }),
                                }), None, None))
-                       }
+                       },
+                       // TODO (taproot|arik)
+                       #[cfg(taproot)]
+                       _ => todo!()
                }
        }
 
@@ -4466,10 +4759,10 @@ impl<SP: Deref> Channel<SP> where
                &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
        ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
        {
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
                }
-               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
+               if self.context.channel_state.is_pre_funded_state() {
                        // Spec says we should fail the connection, not the channel, but that's nonsense, there
                        // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
                        // can do that via error message without getting a connection fail anyway...
@@ -4480,7 +4773,7 @@ impl<SP: Deref> Channel<SP> where
                                return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
                        }
                }
-               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+               assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
 
                if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
                        return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
@@ -4497,7 +4790,7 @@ impl<SP: Deref> Channel<SP> where
                // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
                // immediately after the commitment dance, but we can send a Shutdown because we won't send
                // any further commitment updates after we set LocalShutdownSent.
-               let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
+               let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
 
                let update_shutdown_script = match self.context.shutdown_scriptpubkey {
                        Some(_) => false,
@@ -4517,13 +4810,14 @@ impl<SP: Deref> Channel<SP> where
 
                // From here on out, we may not fail!
 
-               self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
+               self.context.channel_state.set_remote_shutdown_sent();
                self.context.update_time_counter += 1;
 
                let monitor_update = if update_shutdown_script {
                        self.context.latest_monitor_update_id += 1;
                        let monitor_update = ChannelMonitorUpdate {
                                update_id: self.context.latest_monitor_update_id,
+                               counterparty_node_id: Some(self.context.counterparty_node_id),
                                updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
                                        scriptpubkey: self.get_closing_scriptpubkey(),
                                }],
@@ -4553,7 +4847,7 @@ impl<SP: Deref> Channel<SP> where
                        }
                });
 
-               self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
+               self.context.channel_state.set_local_shutdown_sent();
                self.context.update_time_counter += 1;
 
                Ok((shutdown, monitor_update, dropped_outbound_htlcs))
@@ -4587,10 +4881,10 @@ impl<SP: Deref> Channel<SP> where
                -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
                where F::Target: FeeEstimator
        {
-               if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
+               if !self.context.channel_state.is_both_sides_shutdown() {
                        return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+               if self.context.channel_state.is_peer_disconnected() {
                        return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
                }
                if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
@@ -4604,7 +4898,7 @@ impl<SP: Deref> Channel<SP> where
                        return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
                }
 
-               if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
+               if self.context.channel_state.is_monitor_update_in_progress() {
                        self.context.pending_counterparty_closing_signed = Some(msg.clone());
                        return Ok((None, None, None));
                }
@@ -4640,9 +4934,11 @@ impl<SP: Deref> Channel<SP> where
                                        monitor_update: None,
                                        dropped_outbound_htlcs: Vec::new(),
                                        unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+                                       channel_id: self.context.channel_id,
+                                       counterparty_node_id: self.context.counterparty_node_id,
                                };
                                let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
-                               self.context.channel_state = ChannelState::ShutdownComplete as u32;
+                               self.context.channel_state = ChannelState::ShutdownComplete;
                                self.context.update_time_counter += 1;
                                return Ok((None, Some(tx), Some(shutdown_result)));
                        }
@@ -4668,8 +4964,10 @@ impl<SP: Deref> Channel<SP> where
                                                                monitor_update: None,
                                                                dropped_outbound_htlcs: Vec::new(),
                                                                unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+                                                               channel_id: self.context.channel_id,
+                                                               counterparty_node_id: self.context.counterparty_node_id,
                                                        };
-                                                       self.context.channel_state = ChannelState::ShutdownComplete as u32;
+                                                       self.context.channel_state = ChannelState::ShutdownComplete;
                                                        self.context.update_time_counter += 1;
                                                        let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
                                                        (Some(tx), Some(shutdown_result))
@@ -4687,7 +4985,10 @@ impl<SP: Deref> Channel<SP> where
                                                                max_fee_satoshis: our_max_fee,
                                                        }),
                                                }), signed_tx, shutdown_result))
-                                       }
+                                       },
+                                       // TODO (taproot|arik)
+                                       #[cfg(taproot)]
+                                       _ => todo!()
                                }
                        }
                }
@@ -4791,7 +5092,7 @@ impl<SP: Deref> Channel<SP> where
        }
 
        pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
-               self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
+               self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
        }
 
        pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
@@ -4799,7 +5100,7 @@ impl<SP: Deref> Channel<SP> where
        }
 
        #[cfg(test)]
-       pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
+       pub fn get_signer(&self) -> &ChannelSignerType<SP> {
                &self.context.holder_signer
        }
 
@@ -4831,7 +5132,7 @@ impl<SP: Deref> Channel<SP> where
        /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
        /// Allowed in any state (including after shutdown)
        pub fn is_awaiting_monitor_update(&self) -> bool {
-               (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
+               self.context.channel_state.is_monitor_update_in_progress()
        }
 
        /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
@@ -4873,17 +5174,18 @@ impl<SP: Deref> Channel<SP> where
        /// advanced state.
        pub fn is_awaiting_initial_mon_persist(&self) -> bool {
                if !self.is_awaiting_monitor_update() { return false; }
-               if self.context.channel_state &
-                       !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
-                               == ChannelState::FundingSent as u32 {
+               if matches!(
+                       self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
+                       if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
+               ) {
                        // If we're not a 0conf channel, we'll be waiting on a monitor update with only
-                       // FundingSent set, though our peer could have sent their channel_ready.
+                       // AwaitingChannelReady set, though our peer could have sent their channel_ready.
                        debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
                        return true;
                }
                if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
                        self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
-                       // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
+                       // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
                        // waiting for the initial monitor persistence. Thus, we check if our commitment
                        // transaction numbers have both been iterated only exactly once (for the
                        // funding_signed), and we're awaiting monitor update.
@@ -4905,27 +5207,25 @@ impl<SP: Deref> Channel<SP> where
 
        /// Returns true if our channel_ready has been sent
        pub fn is_our_channel_ready(&self) -> bool {
-               (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
+               matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
+                       matches!(self.context.channel_state, ChannelState::ChannelReady(_))
        }
 
        /// Returns true if our peer has either initiated or agreed to shut down the channel.
        pub fn received_shutdown(&self) -> bool {
-               (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
+               self.context.channel_state.is_remote_shutdown_sent()
        }
 
        /// Returns true if we either initiated or agreed to shut down the channel.
        pub fn sent_shutdown(&self) -> bool {
-               (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
+               self.context.channel_state.is_local_shutdown_sent()
        }
 
        /// Returns true if this channel is fully shut down. True here implies that no further actions
        /// may/will be taken on this channel, and thus this object should be freed. Any future changes
        /// will be handled appropriately by the chain monitor.
        pub fn is_shutdown(&self) -> bool {
-               if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32  {
-                       assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
-                       true
-               } else { false }
+               matches!(self.context.channel_state, ChannelState::ShutdownComplete)
        }
 
        pub fn channel_update_status(&self) -> ChannelUpdateStatus {
@@ -4962,35 +5262,36 @@ impl<SP: Deref> Channel<SP> where
 
                // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
                // channel_ready until the entire batch is ready.
-               let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
-               let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
-                       self.context.channel_state |= ChannelState::OurChannelReady as u32;
+               let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
+                       self.context.channel_state.set_our_channel_ready();
                        true
-               } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
-                       self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
+               } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
+                       self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
                        self.context.update_time_counter += 1;
                        true
-               } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
+               } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
                        // We got a reorg but not enough to trigger a force close, just ignore.
                        false
                } else {
-                       if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
+                       if self.context.funding_tx_confirmation_height != 0 &&
+                               self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
+                       {
                                // We should never see a funding transaction on-chain until we've received
                                // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
                                // an inbound channel - before that we have no known funding TXID). The fuzzer,
                                // however, may do this and we shouldn't treat it as a bug.
                                #[cfg(not(fuzzing))]
-                               panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
+                               panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
                                        Do NOT broadcast a funding transaction manually - let LDK do it for you!",
-                                       self.context.channel_state);
+                                       self.context.channel_state.to_u32());
                        }
                        // We got a reorg but not enough to trigger a force close, just ignore.
                        false
                };
 
                if need_commitment_update {
-                       if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
-                               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
+                       if !self.context.channel_state.is_monitor_update_in_progress() {
+                               if !self.context.channel_state.is_peer_disconnected() {
                                        let next_per_commitment_point =
                                                self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
                                        return Some(msgs::ChannelReady {
@@ -5144,9 +5445,8 @@ impl<SP: Deref> Channel<SP> where
                        return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
                }
 
-               let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
-               if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
-                  (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
+               if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
+                       self.context.channel_state.is_our_channel_ready() {
                        let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
                        if self.context.funding_tx_confirmation_height == 0 {
                                // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
@@ -5173,8 +5473,8 @@ impl<SP: Deref> Channel<SP> where
                                height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
                        log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
                        // If funding_tx_confirmed_in is unset, the channel must not be active
-                       assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
-                       assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
+                       assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
+                       assert!(!self.context.channel_state.is_our_channel_ready());
                        return Err(ClosureReason::FundingTimedOut);
                }
 
@@ -5196,7 +5496,7 @@ impl<SP: Deref> Channel<SP> where
                        // larger. If we don't know that time has moved forward, we can just set it to the last
                        // time we saw and it will be ignored.
                        let best_time = self.context.update_time_counter;
-                       match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
+                       match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
                                Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
                                        assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
                                        assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
@@ -5272,7 +5572,7 @@ impl<SP: Deref> Channel<SP> where
                        return None;
                }
 
-               if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
+               if self.context.channel_state.is_peer_disconnected() {
                        log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
                        return None;
                }
@@ -5318,7 +5618,10 @@ impl<SP: Deref> Channel<SP> where
                                        node_signature: our_node_sig,
                                        bitcoin_signature: our_bitcoin_sig,
                                })
-                       }
+                       },
+                       // TODO (taproot|arik)
+                       #[cfg(taproot)]
+                       _ => todo!()
                }
        }
 
@@ -5345,7 +5648,10 @@ impl<SP: Deref> Channel<SP> where
                                                bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
                                                contents: announcement,
                                        })
-                               }
+                               },
+                               // TODO (taproot|arik)
+                               #[cfg(taproot)]
+                               _ => todo!()
                        }
                } else {
                        Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
@@ -5404,7 +5710,7 @@ impl<SP: Deref> Channel<SP> where
        /// May panic if called on a channel that wasn't immediately-previously
        /// self.remove_uncommitted_htlcs_and_mark_paused()'d
        pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
-               assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
+               assert!(self.context.channel_state.is_peer_disconnected());
                assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
                // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
                // current to_remote balances. However, it no longer has any use, and thus is now simply
@@ -5440,7 +5746,7 @@ impl<SP: Deref> Channel<SP> where
                        // (which is one further, as they always revoke previous commitment transaction, not
                        // the one we send) so we have to decrement by 1. Note that if
                        // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
-                       // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
+                       // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
                        // overflow here.
                        next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
                        your_last_per_commitment_secret: remote_last_secret,
@@ -5463,13 +5769,13 @@ impl<SP: Deref> Channel<SP> where
        pub fn queue_add_htlc<F: Deref, L: Deref>(
                &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
                onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
-               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+               blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
        ) -> Result<(), ChannelError>
        where F::Target: FeeEstimator, L::Target: Logger
        {
                self
                        .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
-                               skimmed_fee_msat, fee_estimator, logger)
+                               skimmed_fee_msat, blinding_point, fee_estimator, logger)
                        .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
                        .map_err(|err| {
                                if let ChannelError::Ignore(_) = err { /* fine */ }
@@ -5497,11 +5803,15 @@ impl<SP: Deref> Channel<SP> where
        fn send_htlc<F: Deref, L: Deref>(
                &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
                onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
-               skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+               skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
        ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
        where F::Target: FeeEstimator, L::Target: Logger
        {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
+               if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
+                       self.context.channel_state.is_local_shutdown_sent() ||
+                       self.context.channel_state.is_remote_shutdown_sent()
+               {
                        return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
                }
                let channel_total_msat = self.context.channel_value_satoshis * 1000;
@@ -5524,7 +5834,7 @@ impl<SP: Deref> Channel<SP> where
                                available_balances.next_outbound_htlc_limit_msat)));
                }
 
-               if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
+               if self.context.channel_state.is_peer_disconnected() {
                        // Note that this should never really happen, if we're !is_live() on receipt of an
                        // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
                        // the user to send directly into a !is_live() channel. However, if we
@@ -5534,7 +5844,7 @@ impl<SP: Deref> Channel<SP> where
                        return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
                }
 
-               let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
+               let need_holding_cell = self.context.channel_state.should_force_holding_cell();
                log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
                        payment_hash, amount_msat,
                        if force_holding_cell { "into holding cell" }
@@ -5554,6 +5864,7 @@ impl<SP: Deref> Channel<SP> where
                                source,
                                onion_routing_packet,
                                skimmed_fee_msat,
+                               blinding_point,
                        });
                        return Ok(None);
                }
@@ -5565,6 +5876,7 @@ impl<SP: Deref> Channel<SP> where
                        cltv_expiry,
                        state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
                        source,
+                       blinding_point,
                        skimmed_fee_msat,
                });
 
@@ -5576,6 +5888,7 @@ impl<SP: Deref> Channel<SP> where
                        cltv_expiry,
                        onion_routing_packet,
                        skimmed_fee_msat,
+                       blinding_point,
                };
                self.context.next_holder_htlc_id += 1;
 
@@ -5628,6 +5941,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.latest_monitor_update_id += 1;
                let monitor_update = ChannelMonitorUpdate {
                        update_id: self.context.latest_monitor_update_id,
+                       counterparty_node_id: Some(self.context.counterparty_node_id),
                        updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
                                commitment_txid: counterparty_commitment_txid,
                                htlc_outputs: htlcs.clone(),
@@ -5638,7 +5952,7 @@ impl<SP: Deref> Channel<SP> where
                                to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
                        }]
                };
-               self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
+               self.context.channel_state.set_awaiting_remote_revoke();
                monitor_update
        }
 
@@ -5692,8 +6006,12 @@ impl<SP: Deref> Channel<SP> where
                                                htlcs.push(htlc);
                                        }
 
-                                       let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
-                                               .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
+                                       let res = ecdsa.sign_counterparty_commitment(
+                                                       &commitment_stats.tx,
+                                                       commitment_stats.inbound_htlc_preimages,
+                                                       commitment_stats.outbound_htlc_preimages,
+                                                       &self.context.secp_ctx,
+                                               ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
                                        signature = res.0;
                                        htlc_signatures = res.1;
 
@@ -5718,7 +6036,10 @@ impl<SP: Deref> Channel<SP> where
                                        #[cfg(taproot)]
                                        partial_signature_with_nonce: None,
                                }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
-                       }
+                       },
+                       // TODO (taproot|arik)
+                       #[cfg(taproot)]
+                       _ => todo!()
                }
        }
 
@@ -5735,7 +6056,7 @@ impl<SP: Deref> Channel<SP> where
        where F::Target: FeeEstimator, L::Target: Logger
        {
                let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
-                       onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
+                       onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
                if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
                match send_res? {
                        Some(_) => {
@@ -5765,44 +6086,32 @@ impl<SP: Deref> Channel<SP> where
 
        /// Begins the shutdown process, getting a message for the remote peer and returning all
        /// holding cell HTLCs for payment failure.
-       ///
-       /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
-       /// [`ChannelMonitorUpdate`] will be returned).
        pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
                target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
-       -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
+       -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
        {
                for htlc in self.context.pending_outbound_htlcs.iter() {
                        if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
                                return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
                        }
                }
-               if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
-                       if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
-                               return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
-                       }
-                       else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
-                               return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
-                       }
+               if self.context.channel_state.is_local_shutdown_sent() {
+                       return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
+               }
+               else if self.context.channel_state.is_remote_shutdown_sent() {
+                       return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
                }
                if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
                        return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
                }
-               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
+               assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
+               if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
                        return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
                }
 
-               // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
-               // script is set, we just force-close and call it a day.
-               let mut chan_closed = false;
-               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
-                       chan_closed = true;
-               }
-
                let update_shutdown_script = match self.context.shutdown_scriptpubkey {
                        Some(_) => false,
-                       None if !chan_closed => {
+                       None => {
                                // use override shutdown script if provided
                                let shutdown_scriptpubkey = match override_shutdown_script {
                                        Some(script) => script,
@@ -5820,29 +6129,18 @@ impl<SP: Deref> Channel<SP> where
                                self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
                                true
                        },
-                       None => false,
                };
 
                // From here on out, we may not fail!
                self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
-               let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
-                       let shutdown_result = ShutdownResult {
-                               monitor_update: None,
-                               dropped_outbound_htlcs: Vec::new(),
-                               unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
-                       };
-                       self.context.channel_state = ChannelState::ShutdownComplete as u32;
-                       Some(shutdown_result)
-               } else {
-                       self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
-                       None
-               };
+               self.context.channel_state.set_local_shutdown_sent();
                self.context.update_time_counter += 1;
 
                let monitor_update = if update_shutdown_script {
                        self.context.latest_monitor_update_id += 1;
                        let monitor_update = ChannelMonitorUpdate {
                                update_id: self.context.latest_monitor_update_id,
+                               counterparty_node_id: Some(self.context.counterparty_node_id),
                                updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
                                        scriptpubkey: self.get_closing_scriptpubkey(),
                                }],
@@ -5872,7 +6170,7 @@ impl<SP: Deref> Channel<SP> where
                debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
                        "we can't both complete shutdown and return a monitor update");
 
-               Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
+               Ok((shutdown, monitor_update, dropped_outbound_htlcs))
        }
 
        pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
@@ -5960,7 +6258,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                        }
                }
 
-               let destination_script = match signer_provider.get_destination_script() {
+               let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
                        Ok(script) => script,
                        Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
                };
@@ -5983,7 +6281,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
 
                                channel_id: temporary_channel_id,
                                temporary_channel_id: Some(temporary_channel_id),
-                               channel_state: ChannelState::OurInitSent as u32,
+                               channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
                                announcement_sigs_state: AnnouncementSigsState::NotSent,
                                secp_ctx,
                                channel_value_satoshis,
@@ -6100,6 +6398,38 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                })
        }
 
+       /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
+       fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
+               let counterparty_keys = self.context.build_remote_transaction_keys();
+               let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+               let signature = match &self.context.holder_signer {
+                       // TODO (taproot|arik): move match into calling method for Taproot
+                       ChannelSignerType::Ecdsa(ecdsa) => {
+                               ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
+                                       .map(|(sig, _)| sig).ok()?
+                       },
+                       // TODO (taproot|arik)
+                       #[cfg(taproot)]
+                       _ => todo!()
+               };
+
+               if self.context.signer_pending_funding {
+                       log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
+                       self.context.signer_pending_funding = false;
+               }
+
+               Some(msgs::FundingCreated {
+                       temporary_channel_id: self.context.temporary_channel_id.unwrap(),
+                       funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
+                       funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
+                       signature,
+                       #[cfg(taproot)]
+                       partial_signature_with_nonce: None,
+                       #[cfg(taproot)]
+                       next_local_nonce: None,
+               })
+       }
+
        /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
        /// a funding_created message for the remote peer.
        /// Panics if called at some time other than immediately after initial handshake, if called twice,
@@ -6107,12 +6437,15 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
        /// Note that channel_id changes during this call!
        /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
        /// If an Err is returned, it is a ChannelError::Close.
-       pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
-       -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
+       pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
+       -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
                if !self.context.is_outbound() {
                        panic!("Tried to create outbound funding_created message on an inbound channel!");
                }
-               if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
+               if !matches!(
+                       self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+                       if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+               ) {
                        panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
                }
                if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
@@ -6126,7 +6459,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
 
                // Now that we're past error-generating stuff, update our local state:
 
-               self.context.channel_state = ChannelState::FundingCreated as u32;
+               self.context.channel_state = ChannelState::FundingNegotiated;
                self.context.channel_id = funding_txo.to_channel_id();
 
                // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
@@ -6140,19 +6473,20 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                self.context.funding_transaction = Some(funding_transaction);
                self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
 
-               let funding_created = self.context.get_funding_created_msg(logger);
+               let funding_created = self.get_funding_created_msg(logger);
                if funding_created.is_none() {
-                       if !self.context.signer_pending_funding {
-                               log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
-                               self.context.signer_pending_funding = true;
+                       #[cfg(not(async_signing))] {
+                               panic!("Failed to get signature for new funding creation");
+                       }
+                       #[cfg(async_signing)] {
+                               if !self.context.signer_pending_funding {
+                                       log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
+                                       self.context.signer_pending_funding = true;
+                               }
                        }
                }
 
-               let channel = Channel {
-                       context: self.context,
-               };
-
-               Ok((channel, funding_created))
+               Ok(funding_created)
        }
 
        fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
@@ -6187,7 +6521,14 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
        where
                F::Target: FeeEstimator
        {
-               if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
+               if !self.context.is_outbound() ||
+                       !matches!(
+                               self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+                               if flags == NegotiatingFundingFlags::OUR_INIT_SENT
+                       )
+               {
+                       return Err(());
+               }
                if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
                        // We've exhausted our options
                        return Err(());
@@ -6218,7 +6559,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                if !self.context.is_outbound() {
                        panic!("Tried to open a channel for an inbound channel?");
                }
-               if self.context.channel_state != ChannelState::OurInitSent as u32 {
+               if self.context.have_received_message() {
                        panic!("Cannot generate an open_channel after we've moved forward");
                }
 
@@ -6264,7 +6605,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                if !self.context.is_outbound() {
                        return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
                }
-               if self.context.channel_state != ChannelState::OurInitSent as u32 {
+               if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
                        return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
                }
                if msg.dust_limit_satoshis > 21000000 * 100000000 {
@@ -6381,11 +6722,119 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
                self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
 
-               self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
+               self.context.channel_state = ChannelState::NegotiatingFunding(
+                       NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+               );
                self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
 
                Ok(())
        }
+
+       /// Handles a funding_signed message from the remote end.
+       /// If this call is successful, broadcast the funding transaction (and not before!)
+       pub fn funding_signed<L: Deref>(
+               mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
+       ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
+       where
+               L::Target: Logger
+       {
+               if !self.context.is_outbound() {
+                       return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
+               }
+               if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
+                       return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
+               }
+               if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
+                               self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
+                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+                       panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
+               }
+
+               let funding_script = self.context.get_funding_redeemscript();
+
+               let counterparty_keys = self.context.build_remote_transaction_keys();
+               let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+               let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
+               let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
+
+               log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
+                       &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+
+               let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+               let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
+               {
+                       let trusted_tx = initial_commitment_tx.trust();
+                       let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
+                       let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+                       // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
+                       if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
+                               return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
+                       }
+               }
+
+               let holder_commitment_tx = HolderCommitmentTransaction::new(
+                       initial_commitment_tx,
+                       msg.signature,
+                       Vec::new(),
+                       &self.context.get_holder_pubkeys().funding_pubkey,
+                       self.context.counterparty_funding_pubkey()
+               );
+
+               let validated =
+                       self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
+               if validated.is_err() {
+                       return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
+               }
+
+               let funding_redeemscript = self.context.get_funding_redeemscript();
+               let funding_txo = self.context.get_funding_txo().unwrap();
+               let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+               let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
+               let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
+               let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
+               monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+               let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
+                                                         shutdown_script, self.context.get_holder_selected_contest_delay(),
+                                                         &self.context.destination_script, (funding_txo, funding_txo_script),
+                                                         &self.context.channel_transaction_parameters,
+                                                         funding_redeemscript.clone(), self.context.channel_value_satoshis,
+                                                         obscure_factor,
+                                                         holder_commitment_tx, best_block, self.context.counterparty_node_id);
+               channel_monitor.provide_initial_counterparty_commitment_tx(
+                       counterparty_initial_bitcoin_tx.txid, Vec::new(),
+                       self.context.cur_counterparty_commitment_transaction_number,
+                       self.context.counterparty_cur_commitment_point.unwrap(),
+                       counterparty_initial_commitment_tx.feerate_per_kw(),
+                       counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
+                       counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
+
+               assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
+               if self.context.is_batch_funding() {
+                       self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
+               } else {
+                       self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
+               }
+               self.context.cur_holder_commitment_transaction_number -= 1;
+               self.context.cur_counterparty_commitment_transaction_number -= 1;
+
+               log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
+
+               let mut channel = Channel { context: self.context };
+
+               let need_channel_ready = channel.check_get_channel_ready(0).is_some();
+               channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+               Ok((channel, channel_monitor))
+       }
+
+       /// Indicates that the signer may have some signatures for us, so we should retry if we're
+       /// blocked.
+       #[cfg(async_signing)]
+       pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
+               if self.context.signer_pending_funding && self.context.is_outbound() {
+                       log_trace!(logger, "Signer unblocked a funding_created");
+                       self.get_funding_created_msg(logger)
+               } else { None }
+       }
 }
 
 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
@@ -6394,6 +6843,41 @@ pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
        pub unfunded_context: UnfundedChannelContext,
 }
 
+/// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
+/// [`msgs::OpenChannel`].
+pub(super) fn channel_type_from_open_channel(
+       msg: &msgs::OpenChannel, their_features: &InitFeatures,
+       our_supported_features: &ChannelTypeFeatures
+) -> Result<ChannelTypeFeatures, ChannelError> {
+       if let Some(channel_type) = &msg.channel_type {
+               if channel_type.supports_any_optional_bits() {
+                       return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
+               }
+
+               // We only support the channel types defined by the `ChannelManager` in
+               // `provided_channel_type_features`. The channel type must always support
+               // `static_remote_key`.
+               if !channel_type.requires_static_remote_key() {
+                       return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
+               }
+               // Make sure we support all of the features behind the channel type.
+               if !channel_type.is_subset(our_supported_features) {
+                       return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
+               }
+               let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
+               if channel_type.requires_scid_privacy() && announced_channel {
+                       return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
+               }
+               Ok(channel_type.clone())
+       } else {
+               let channel_type = ChannelTypeFeatures::from_init(&their_features);
+               if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+                       return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+               }
+               Ok(channel_type)
+       }
+}
+
 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
        /// Creates a new channel from a remote sides' request for one.
        /// Assumes chain_hash has already been checked and corresponds with what we expect!
@@ -6407,36 +6891,12 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                          F::Target: FeeEstimator,
                          L::Target: Logger,
        {
+               let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
                let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
 
                // First check the channel type is known, failing before we do anything else if we don't
                // support this channel type.
-               let channel_type = if let Some(channel_type) = &msg.channel_type {
-                       if channel_type.supports_any_optional_bits() {
-                               return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
-                       }
-
-                       // We only support the channel types defined by the `ChannelManager` in
-                       // `provided_channel_type_features`. The channel type must always support
-                       // `static_remote_key`.
-                       if !channel_type.requires_static_remote_key() {
-                               return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
-                       }
-                       // Make sure we support all of the features behind the channel type.
-                       if !channel_type.is_subset(our_supported_features) {
-                               return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
-                       }
-                       if channel_type.requires_scid_privacy() && announced_channel {
-                               return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
-                       }
-                       channel_type.clone()
-               } else {
-                       let channel_type = ChannelTypeFeatures::from_init(&their_features);
-                       if channel_type != ChannelTypeFeatures::only_static_remote_key() {
-                               return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
-                       }
-                       channel_type
-               };
+               let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
 
                let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
                let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
@@ -6473,7 +6933,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                if msg.htlc_minimum_msat >= full_channel_value_msat {
                        return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
                }
-               Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
+               Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
 
                let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
                if msg.to_self_delay > max_counterparty_selected_contest_delay {
@@ -6587,7 +7047,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                        }
                }
 
-               let destination_script = match signer_provider.get_destination_script() {
+               let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
                        Ok(script) => script,
                        Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
                };
@@ -6617,7 +7077,9 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
 
                                temporary_channel_id: Some(msg.temporary_channel_id),
                                channel_id: msg.temporary_channel_id,
-                               channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
+                               channel_state: ChannelState::NegotiatingFunding(
+                                       NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+                               ),
                                announcement_sigs_state: AnnouncementSigsState::NotSent,
                                secp_ctx,
 
@@ -6747,7 +7209,10 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                if self.context.is_outbound() {
                        panic!("Tried to send accept_channel for an outbound channel?");
                }
-               if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
+               if !matches!(
+                       self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+                       if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+               ) {
                        panic!("Tried to send accept_channel after channel had moved forward");
                }
                if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
@@ -6820,14 +7285,17 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
 
        pub fn funding_created<L: Deref>(
                mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
-       ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), (Self, ChannelError)>
+       ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
        where
                L::Target: Logger
        {
                if self.context.is_outbound() {
                        return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
                }
-               if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
+               if !matches!(
+                       self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+                       if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+               ) {
                        // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
                        // remember the channel, so it's safe to just send an error_message here and drop the
                        // channel.
@@ -6872,7 +7340,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
 
                // Now that we're past error-generating stuff, update our local state:
 
-               self.context.channel_state = ChannelState::FundingSent as u32;
+               self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
                self.context.channel_id = funding_txo.to_channel_id();
                self.context.cur_counterparty_commitment_transaction_number -= 1;
                self.context.cur_holder_commitment_transaction_number -= 1;
@@ -6892,7 +7360,6 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                                                          funding_redeemscript.clone(), self.context.channel_value_satoshis,
                                                          obscure_factor,
                                                          holder_commitment_tx, best_block, self.context.counterparty_node_id);
-
                channel_monitor.provide_initial_counterparty_commitment_tx(
                        counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
                        self.context.cur_counterparty_commitment_transaction_number + 1,
@@ -6991,7 +7458,13 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                writer.write_all(&[0; 8])?;
 
                self.context.channel_id.write(writer)?;
-               (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
+               {
+                       let mut channel_state = self.context.channel_state;
+                       if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
+                               channel_state.set_peer_disconnected();
+                       }
+                       channel_state.to_u32().write(writer)?;
+               }
                self.context.channel_value_satoshis.write(writer)?;
 
                self.context.latest_monitor_update_id.write(writer)?;
@@ -7045,9 +7518,10 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
 
                let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
                let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
+               let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
 
                (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
-               for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
+               for htlc in self.context.pending_outbound_htlcs.iter() {
                        htlc.htlc_id.write(writer)?;
                        htlc.amount_msat.write(writer)?;
                        htlc.cltv_expiry.write(writer)?;
@@ -7083,23 +7557,20 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                                        reason.write(writer)?;
                                }
                        }
-                       if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
-                               if pending_outbound_skimmed_fees.is_empty() {
-                                       for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
-                               }
-                               pending_outbound_skimmed_fees.push(Some(skimmed_fee));
-                       } else if !pending_outbound_skimmed_fees.is_empty() {
-                               pending_outbound_skimmed_fees.push(None);
-                       }
+                       pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
+                       pending_outbound_blinding_points.push(htlc.blinding_point);
                }
 
                let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
+               let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
+               // Vec of (htlc_id, failure_code, sha256_of_onion)
+               let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
                (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
-               for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
+               for update in self.context.holding_cell_htlc_updates.iter() {
                        match update {
                                &HTLCUpdateAwaitingACK::AddHTLC {
                                        ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
-                                       skimmed_fee_msat,
+                                       blinding_point, skimmed_fee_msat,
                                } => {
                                        0u8.write(writer)?;
                                        amount_msat.write(writer)?;
@@ -7108,12 +7579,8 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                                        source.write(writer)?;
                                        onion_routing_packet.write(writer)?;
 
-                                       if let Some(skimmed_fee) = skimmed_fee_msat {
-                                               if holding_cell_skimmed_fees.is_empty() {
-                                                       for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
-                                               }
-                                               holding_cell_skimmed_fees.push(Some(skimmed_fee));
-                                       } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
+                                       holding_cell_skimmed_fees.push(skimmed_fee_msat);
+                                       holding_cell_blinding_points.push(blinding_point);
                                },
                                &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
                                        1u8.write(writer)?;
@@ -7125,6 +7592,18 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                                        htlc_id.write(writer)?;
                                        err_packet.write(writer)?;
                                }
+                               &HTLCUpdateAwaitingACK::FailMalformedHTLC {
+                                       htlc_id, failure_code, sha256_of_onion
+                               } => {
+                                       // We don't want to break downgrading by adding a new variant, so write a dummy
+                                       // `::FailHTLC` variant and write the real malformed error as an optional TLV.
+                                       malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
+
+                                       let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
+                                       2u8.write(writer)?;
+                                       htlc_id.write(writer)?;
+                                       dummy_err_packet.write(writer)?;
+                               }
                        }
                }
 
@@ -7283,6 +7762,9 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        (35, pending_outbound_skimmed_fees, optional_vec),
                        (37, holding_cell_skimmed_fees, optional_vec),
                        (38, self.context.is_batch_funding, option),
+                       (39, pending_outbound_blinding_points, optional_vec),
+                       (41, holding_cell_blinding_points, optional_vec),
+                       (43, malformed_htlcs, optional_vec), // Added in 0.0.119
                });
 
                Ok(())
@@ -7317,7 +7799,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                }
 
                let channel_id = Readable::read(reader)?;
-               let channel_state = Readable::read(reader)?;
+               let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
                let channel_value_satoshis = Readable::read(reader)?;
 
                let latest_monitor_update_id = Readable::read(reader)?;
@@ -7394,6 +7876,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                        _ => return Err(DecodeError::InvalidValue),
                                },
                                skimmed_fee_msat: None,
+                               blinding_point: None,
                        });
                }
 
@@ -7408,6 +7891,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                        source: Readable::read(reader)?,
                                        onion_routing_packet: Readable::read(reader)?,
                                        skimmed_fee_msat: None,
+                                       blinding_point: None,
                                },
                                1 => HTLCUpdateAwaitingACK::ClaimHTLC {
                                        payment_preimage: Readable::read(reader)?,
@@ -7568,6 +8052,11 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 
                let mut is_batch_funding: Option<()> = None;
 
+               let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
+               let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
+
+               let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
+
                read_tlv_fields!(reader, {
                        (0, announcement_sigs, option),
                        (1, minimum_depth, option),
@@ -7594,14 +8083,16 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (35, pending_outbound_skimmed_fees_opt, optional_vec),
                        (37, holding_cell_skimmed_fees_opt, optional_vec),
                        (38, is_batch_funding, option),
+                       (39, pending_outbound_blinding_points_opt, optional_vec),
+                       (41, holding_cell_blinding_points_opt, optional_vec),
+                       (43, malformed_htlcs, optional_vec), // Added in 0.0.119
                });
 
                let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
                        let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
                        // If we've gotten to the funding stage of the channel, populate the signer with its
                        // required channel parameters.
-                       let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
-                       if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
+                       if channel_state >= ChannelState::FundingNegotiated {
                                holder_signer.provide_channel_parameters(&channel_parameters);
                        }
                        (channel_keys_id, holder_signer)
@@ -7670,6 +8161,40 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        // We expect all skimmed fees to be consumed above
                        if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
                }
+               if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
+                       let mut iter = blinding_pts.into_iter();
+                       for htlc in pending_outbound_htlcs.iter_mut() {
+                               htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
+                       }
+                       // We expect all blinding points to be consumed above
+                       if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+               }
+               if let Some(blinding_pts) = holding_cell_blinding_points_opt {
+                       let mut iter = blinding_pts.into_iter();
+                       for htlc in holding_cell_htlc_updates.iter_mut() {
+                               if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
+                                       *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
+                               }
+                       }
+                       // We expect all blinding points to be consumed above
+                       if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+               }
+
+               if let Some(malformed_htlcs) = malformed_htlcs {
+                       for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
+                               let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
+                                       if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
+                                               let matches = *htlc_id == malformed_htlc_id;
+                                               if matches { debug_assert!(err_packet.data.is_empty()) }
+                                               matches
+                                       } else { false }
+                               }).ok_or(DecodeError::InvalidValue)?;
+                               let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
+                                       htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
+                               };
+                               let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
+                       }
+               }
 
                Ok(Channel {
                        context: ChannelContext {
@@ -7805,13 +8330,15 @@ mod tests {
        use bitcoin::blockdata::transaction::{Transaction, TxOut};
        use bitcoin::blockdata::opcodes;
        use bitcoin::network::constants::Network;
-       use crate::ln::PaymentHash;
+       use crate::ln::onion_utils::INVALID_ONION_BLINDING;
+       use crate::ln::{PaymentHash, PaymentPreimage};
        use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
-use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
+       use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
        use crate::ln::channel::InitFeatures;
-       use crate::ln::channel::{ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
+       use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
        use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
-       use crate::ln::features::ChannelTypeFeatures;
+       use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
+       use crate::ln::msgs;
        use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
        use crate::ln::script::ShutdownScript;
        use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
@@ -7819,9 +8346,10 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
        use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
        use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
        use crate::chain::transaction::OutPoint;
-       use crate::routing::router::Path;
+       use crate::routing::router::{Path, RouteHop};
        use crate::util::config::UserConfig;
        use crate::util::errors::APIError;
+       use crate::util::ser::{ReadableArgs, Writeable};
        use crate::util::test_utils;
        use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
        use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
@@ -7860,19 +8388,21 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
        }
 
        impl SignerProvider for Keys {
-               type Signer = InMemorySigner;
+               type EcdsaSigner = InMemorySigner;
+               #[cfg(taproot)]
+               type TaprootSigner = InMemorySigner;
 
                fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
                        self.signer.channel_keys_id()
                }
 
-               fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
+               fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
                        self.signer.clone()
                }
 
-               fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
+               fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
 
-               fn get_destination_script(&self) -> Result<ScriptBuf, ()> {
+               fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
                        let secp_ctx = Secp256k1::signing_only();
                        let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
                        let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
@@ -7978,11 +8508,12 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+               let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
-               let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
+               let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
+               let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
 
                // Put some inbound and outbound HTLCs in A's channel.
                let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
@@ -8007,6 +8538,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                                payment_id: PaymentId([42; 32]),
                        },
                        skimmed_fee_msat: None,
+                       blinding_point: None,
                });
 
                // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
@@ -8105,11 +8637,12 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+               let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
-               let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
+               let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
+               let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
 
                // Now disconnect the two nodes and check that the commitment point in
                // Node B's channel_reestablish message is sane.
@@ -8293,11 +8826,12 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
+               let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
-               let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
+               let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
+               let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
 
                // Make sure that receiving a channel update will update the Channel as expected.
                let update = ChannelUpdate {
@@ -8332,7 +8866,114 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                assert!(!node_a_chan.channel_update(&update).unwrap());
        }
 
-       #[cfg(feature = "_test_vectors")]
+       #[test]
+       fn blinding_point_skimmed_fee_malformed_ser() {
+               // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
+               // properly.
+               let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
+               let secp_ctx = Secp256k1::new();
+               let seed = [42; 32];
+               let network = Network::Testnet;
+               let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+
+               let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let config = UserConfig::default();
+               let features = channelmanager::provided_init_features(&config);
+               let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
+               let mut chan = Channel { context: outbound_chan.context };
+
+               let dummy_htlc_source = HTLCSource::OutboundRoute {
+                       path: Path {
+                               hops: vec![RouteHop {
+                                       pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
+                                       node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
+                                       cltv_expiry_delta: 0, maybe_announced_channel: false,
+                               }],
+                               blinded_tail: None
+                       },
+                       session_priv: test_utils::privkey(42),
+                       first_hop_htlc_msat: 0,
+                       payment_id: PaymentId([42; 32]),
+               };
+               let dummy_outbound_output = OutboundHTLCOutput {
+                       htlc_id: 0,
+                       amount_msat: 0,
+                       payment_hash: PaymentHash([43; 32]),
+                       cltv_expiry: 0,
+                       state: OutboundHTLCState::Committed,
+                       source: dummy_htlc_source.clone(),
+                       skimmed_fee_msat: None,
+                       blinding_point: None,
+               };
+               let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
+               for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
+                       if idx % 2 == 0 {
+                               htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
+                       }
+                       if idx % 3 == 0 {
+                               htlc.skimmed_fee_msat = Some(1);
+                       }
+               }
+               chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
+
+               let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
+                       amount_msat: 0,
+                       cltv_expiry: 0,
+                       payment_hash: PaymentHash([43; 32]),
+                       source: dummy_htlc_source.clone(),
+                       onion_routing_packet: msgs::OnionPacket {
+                               version: 0,
+                               public_key: Ok(test_utils::pubkey(1)),
+                               hop_data: [0; 20*65],
+                               hmac: [0; 32]
+                       },
+                       skimmed_fee_msat: None,
+                       blinding_point: None,
+               };
+               let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
+                       payment_preimage: PaymentPreimage([42; 32]),
+                       htlc_id: 0,
+               };
+               let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
+                       htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
+               };
+               let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
+                       htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
+               };
+               let mut holding_cell_htlc_updates = Vec::with_capacity(12);
+               for i in 0..12 {
+                       if i % 5 == 0 {
+                               holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
+                       } else if i % 5 == 1 {
+                               holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
+                       } else if i % 5 == 2 {
+                               let mut dummy_add = dummy_holding_cell_add_htlc.clone();
+                               if let HTLCUpdateAwaitingACK::AddHTLC {
+                                       ref mut blinding_point, ref mut skimmed_fee_msat, ..
+                               } = &mut dummy_add {
+                                       *blinding_point = Some(test_utils::pubkey(42 + i));
+                                       *skimmed_fee_msat = Some(42);
+                               } else { panic!() }
+                               holding_cell_htlc_updates.push(dummy_add);
+                       } else if i % 5 == 3 {
+                               holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
+                       } else {
+                               holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
+                       }
+               }
+               chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
+
+               // Encode and decode the channel and ensure that the HTLCs within are the same.
+               let encoded_chan = chan.encode();
+               let mut s = crate::io::Cursor::new(&encoded_chan);
+               let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
+               let features = channelmanager::provided_channel_type_features(&config);
+               let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
+               assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
+               assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
+       }
+
+       #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
        #[test]
        fn outbound_commitment_test() {
                use bitcoin::sighash;
@@ -8341,7 +8982,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                use bitcoin::hashes::hex::FromHex;
                use bitcoin::hash_types::Txid;
                use bitcoin::secp256k1::Message;
-               use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, EcdsaChannelSigner};
+               use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
                use crate::ln::PaymentPreimage;
                use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
                use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
@@ -8353,7 +8994,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
 
                // Test vectors from BOLT 3 Appendices C and F (anchors):
                let feeest = TestFeeEstimator{fee_est: 15000};
-               let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
+               let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
                let secp_ctx = Secp256k1::new();
 
                let mut signer = InMemorySigner::new(
@@ -8581,6 +9222,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
                                skimmed_fee_msat: None,
+                               blinding_point: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
                        out
@@ -8594,6 +9236,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
                                skimmed_fee_msat: None,
+                               blinding_point: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
                        out
@@ -9005,6 +9648,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
                                skimmed_fee_msat: None,
+                               blinding_point: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
                        out
@@ -9018,6 +9662,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
                                skimmed_fee_msat: None,
+                               blinding_point: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
                        out
@@ -9084,7 +9729,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
                           <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
        }
-       
+
        #[test]
        fn test_key_derivation() {
                // Test vectors from BOLT 3 Appendix E:
@@ -9363,11 +10008,8 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                                },
                        ]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
-                       tx.clone(),
-                       funding_outpoint,
-                       true,
-                       &&logger,
+               let funding_created_msg = node_a_chan.get_funding_created(
+                       tx.clone(), funding_outpoint, true, &&logger,
                ).map_err(|_| ()).unwrap();
                let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
                        &funding_created_msg.unwrap(),
@@ -9385,12 +10027,10 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
 
                // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
                // broadcasting the funding transaction until the batch is ready.
-               let _ = node_a_chan.funding_signed(
-                       &funding_signed_msg.unwrap(),
-                       best_block,
-                       &&keys_provider,
-                       &&logger,
-               ).unwrap();
+               let res = node_a_chan.funding_signed(
+                       &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
+               );
+               let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
                let node_a_updates = node_a_chan.monitor_updating_restored(
                        &&logger,
                        &&keys_provider,
@@ -9402,11 +10042,7 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                // as the funding transaction depends on all channels in the batch becoming ready.
                assert!(node_a_updates.channel_ready.is_none());
                assert!(node_a_updates.funding_broadcastable.is_none());
-               assert_eq!(
-                       node_a_chan.context.channel_state,
-                       ChannelState::FundingSent as u32 |
-                       ChannelState::WaitingForBatch as u32,
-               );
+               assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
 
                // It is possible to receive a 0conf channel_ready from the remote node.
                node_a_chan.channel_ready(
@@ -9419,18 +10055,12 @@ use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
                ).unwrap();
                assert_eq!(
                        node_a_chan.context.channel_state,
-                       ChannelState::FundingSent as u32 |
-                       ChannelState::WaitingForBatch as u32 |
-                       ChannelState::TheirChannelReady as u32,
+                       ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
                );
 
                // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
                node_a_chan.set_batch_ready();
-               assert_eq!(
-                       node_a_chan.context.channel_state,
-                       ChannelState::FundingSent as u32 |
-                       ChannelState::TheirChannelReady as u32,
-               );
+               assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
                assert!(node_a_chan.check_get_channel_ready(0).is_some());
        }
 }
index 3f6277b44bbfc57cf38438b74540b0c295f187bf..e04a3cd1a34bdfda679c677e3a09518be526132d 100644 (file)
@@ -36,23 +36,22 @@ use crate::blinded_path::payment::{PaymentConstraints, ReceiveTlvs};
 use crate::chain;
 use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
 use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
-use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
 use crate::chain::transaction::{OutPoint, TransactionData};
 use crate::events;
 use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason};
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
 // construct one themselves.
 use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
+use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
 use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 #[cfg(any(feature = "_test_utils", test))]
 use crate::ln::features::Bolt11InvoiceFeatures;
-use crate::routing::gossip::NetworkGraph;
-use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
-use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
+use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
+use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundOnionErr, NextPacketDetails};
 use crate::ln::msgs;
 use crate::ln::onion_utils;
-use crate::ln::onion_utils::HTLCFailReason;
+use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
 use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
 #[cfg(test)]
 use crate::ln::outbound_payment;
@@ -64,15 +63,23 @@ use crate::offers::merkle::SignError;
 use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
 use crate::offers::parse::Bolt12SemanticError;
 use crate::offers::refund::{Refund, RefundBuilder};
-use crate::onion_message::{Destination, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message};
-use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, WriteableEcdsaChannelSigner};
+use crate::onion_message::{Destination, MessageRouter, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message};
+use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
+use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
 use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
 use crate::util::wakers::{Future, Notifier};
 use crate::util::scid_utils::fake_scid;
 use crate::util::string::UntrustedString;
 use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
-use crate::util::logger::{Level, Logger};
+use crate::util::logger::{Level, Logger, WithContext};
 use crate::util::errors::APIError;
+#[cfg(not(c_bindings))]
+use {
+       crate::routing::router::DefaultRouter,
+       crate::routing::gossip::NetworkGraph,
+       crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
+       crate::sign::KeysManager,
+};
 
 use alloc::collections::{btree_map, BTreeMap};
 
@@ -107,62 +114,150 @@ use crate::ln::script::ShutdownScript;
 // Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
 // our payment, which we can use to decode errors or inform the user that the payment was sent.
 
-/// Routing info for an inbound HTLC onion.
+/// Information about where a received HTLC('s onion) has indicated the HTLC should go.
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+#[cfg_attr(test, derive(Debug, PartialEq))]
 pub enum PendingHTLCRouting {
-       /// A forwarded HTLC.
+       /// An HTLC which should be forwarded on to another node.
        Forward {
-               /// BOLT 4 onion packet.
+               /// The onion which should be included in the forwarded HTLC, telling the next hop what to
+               /// do with the HTLC.
                onion_packet: msgs::OnionPacket,
-               /// The SCID from the onion that we should forward to. This could be a real SCID or a fake one
-               /// generated using `get_fake_scid` from the scid_utils::fake_scid module.
+               /// The short channel ID of the channel which we were instructed to forward this HTLC to.
+               ///
+               /// This could be a real on-chain SCID, an SCID alias, or some other SCID which has meaning
+               /// to the receiving node, such as one returned from
+               /// [`ChannelManager::get_intercept_scid`] or [`ChannelManager::get_phantom_scid`].
                short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
+               /// Set if this HTLC is being forwarded within a blinded path.
+               blinded: Option<BlindedForward>,
        },
-       /// An HTLC paid to an invoice we generated.
+       /// The onion indicates that this is a payment for an invoice (supposedly) generated by us.
+       ///
+       /// Note that at this point, we have not checked that the invoice being paid was actually
+       /// generated by us, but rather it's claiming to pay an invoice of ours.
        Receive {
-               /// Payment secret and total msat received.
+               /// Information about the amount the sender intended to pay and (potential) proof that this
+               /// is a payment for an invoice we generated. This proof of payment is is also used for
+               /// linking MPP parts of a larger payment.
                payment_data: msgs::FinalOnionHopData,
-               /// See [`RecipientOnionFields::payment_metadata`] for more info.
+               /// Additional data which we (allegedly) instructed the sender to include in the onion.
+               ///
+               /// For HTLCs received by LDK, this will ultimately be exposed in
+               /// [`Event::PaymentClaimable::onion_fields`] as
+               /// [`RecipientOnionFields::payment_metadata`].
                payment_metadata: Option<Vec<u8>>,
+               /// CLTV expiry of the received HTLC.
+               ///
                /// Used to track when we should expire pending HTLCs that go unclaimed.
                incoming_cltv_expiry: u32,
-               /// Optional shared secret for phantom node.
+               /// If the onion had forwarding instructions to one of our phantom node SCIDs, this will
+               /// provide the onion shared secret used to decrypt the next level of forwarding
+               /// instructions.
                phantom_shared_secret: Option<[u8; 32]>,
-               /// See [`RecipientOnionFields::custom_tlvs`] for more info.
+               /// Custom TLVs which were set by the sender.
+               ///
+               /// For HTLCs received by LDK, this will ultimately be exposed in
+               /// [`Event::PaymentClaimable::onion_fields`] as
+               /// [`RecipientOnionFields::custom_tlvs`].
                custom_tlvs: Vec<(u64, Vec<u8>)>,
+               /// Set if this HTLC is the final hop in a multi-hop blinded path.
+               requires_blinded_error: bool,
        },
-       /// Incoming keysend (sender provided the preimage in a TLV).
+       /// The onion indicates that this is for payment to us but which contains the preimage for
+       /// claiming included, and is unrelated to any invoice we'd previously generated (aka a
+       /// "keysend" or "spontaneous" payment).
        ReceiveKeysend {
-               /// This was added in 0.0.116 and will break deserialization on downgrades.
+               /// Information about the amount the sender intended to pay and possibly a token to
+               /// associate MPP parts of a larger payment.
+               ///
+               /// This will only be filled in if receiving MPP keysend payments is enabled, and it being
+               /// present will cause deserialization to fail on versions of LDK prior to 0.0.116.
                payment_data: Option<msgs::FinalOnionHopData>,
-               /// Preimage for this onion payment.
+               /// Preimage for this onion payment. This preimage is provided by the sender and will be
+               /// used to settle the spontaneous payment.
                payment_preimage: PaymentPreimage,
-               /// See [`RecipientOnionFields::payment_metadata`] for more info.
+               /// Additional data which we (allegedly) instructed the sender to include in the onion.
+               ///
+               /// For HTLCs received by LDK, this will ultimately bubble back up as
+               /// [`RecipientOnionFields::payment_metadata`].
                payment_metadata: Option<Vec<u8>>,
-               /// CLTV expiry of the incoming HTLC.
-               incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
-               /// See [`RecipientOnionFields::custom_tlvs`] for more info.
+               /// CLTV expiry of the received HTLC.
+               ///
+               /// Used to track when we should expire pending HTLCs that go unclaimed.
+               incoming_cltv_expiry: u32,
+               /// Custom TLVs which were set by the sender.
+               ///
+               /// For HTLCs received by LDK, these will ultimately bubble back up as
+               /// [`RecipientOnionFields::custom_tlvs`].
                custom_tlvs: Vec<(u64, Vec<u8>)>,
        },
 }
 
-/// Full details of an incoming HTLC, including routing info.
+/// Information used to forward or fail this HTLC that is being forwarded within a blinded path.
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
+pub struct BlindedForward {
+       /// The `blinding_point` that was set in the inbound [`msgs::UpdateAddHTLC`], or in the inbound
+       /// onion payload if we're the introduction node. Useful for calculating the next hop's
+       /// [`msgs::UpdateAddHTLC::blinding_point`].
+       pub inbound_blinding_point: PublicKey,
+       // Another field will be added here when we support forwarding as a non-intro node.
+}
+
+impl PendingHTLCRouting {
+       // Used to override the onion failure code and data if the HTLC is blinded.
+       fn blinded_failure(&self) -> Option<BlindedFailure> {
+               // TODO: needs update when we support forwarding blinded HTLCs as non-intro node
+               match self {
+                       Self::Forward { blinded: Some(_), .. } => Some(BlindedFailure::FromIntroductionNode),
+                       Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
+                       _ => None,
+               }
+       }
+}
+
+/// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it
+/// should go next.
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+#[cfg_attr(test, derive(Debug, PartialEq))]
 pub struct PendingHTLCInfo {
        /// Further routing details based on whether the HTLC is being forwarded or received.
        pub routing: PendingHTLCRouting,
-       /// Shared secret from the previous hop.
+       /// The onion shared secret we build with the sender used to decrypt the onion.
+       ///
+       /// This is later used to encrypt failure packets in the event that the HTLC is failed.
        pub incoming_shared_secret: [u8; 32],
-       payment_hash: PaymentHash,
-       /// Amount received
-       pub incoming_amt_msat: Option<u64>, // Added in 0.0.113
-       /// Sender intended amount to forward or receive (actual amount received
-       /// may overshoot this in either case)
+       /// Hash of the payment preimage, to lock the payment until the receiver releases the preimage.
+       pub payment_hash: PaymentHash,
+       /// Amount received in the incoming HTLC.
+       ///
+       /// This field was added in LDK 0.0.113 and will be `None` for objects written by prior
+       /// versions.
+       pub incoming_amt_msat: Option<u64>,
+       /// The amount the sender indicated should be forwarded on to the next hop or amount the sender
+       /// intended for us to receive for received payments.
+       ///
+       /// If the received amount is less than this for received payments, an intermediary hop has
+       /// attempted to steal some of our funds and we should fail the HTLC (the sender should retry
+       /// it along another path).
+       ///
+       /// Because nodes can take less than their required fees, and because senders may wish to
+       /// improve their own privacy, this amount may be less than [`Self::incoming_amt_msat`] for
+       /// received payments. In such cases, recipients must handle this HTLC as if it had received
+       /// [`Self::outgoing_amt_msat`].
        pub outgoing_amt_msat: u64,
-       /// Outgoing CLTV height.
+       /// The CLTV the sender has indicated we should set on the forwarded HTLC (or has indicated
+       /// should have been set on the received HTLC for received payments).
        pub outgoing_cltv_value: u32,
-       /// The fee being skimmed off the top of this HTLC. If this is a forward, it'll be the fee we are
-       /// skimming. If we're receiving this HTLC, it's the fee that our counterparty skimmed.
+       /// The fee taken for this HTLC in addition to the standard protocol HTLC fees.
+       ///
+       /// If this is a payment for forwarding, this is the fee we are taking before forwarding the
+       /// HTLC.
+       ///
+       /// If this is a received payment, this is the fee that our counterparty took.
+       ///
+       /// This is used to allow LSPs to take fees as a part of payments, without the sender having to
+       /// shoulder them.
        pub skimmed_fee_msat: Option<u64>,
 }
 
@@ -179,6 +274,7 @@ pub(super) enum PendingHTLCStatus {
        Fail(HTLCFailureMsg),
 }
 
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
 pub(super) struct PendingAddHTLCInfo {
        pub(super) forward_info: PendingHTLCInfo,
 
@@ -194,12 +290,25 @@ pub(super) struct PendingAddHTLCInfo {
        prev_user_channel_id: u128,
 }
 
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
 pub(super) enum HTLCForwardInfo {
        AddHTLC(PendingAddHTLCInfo),
        FailHTLC {
                htlc_id: u64,
                err_packet: msgs::OnionErrorPacket,
        },
+       FailMalformedHTLC {
+               htlc_id: u64,
+               failure_code: u16,
+               sha256_of_onion: [u8; 32],
+       },
+}
+
+// Used for failing blinded HTLCs backwards correctly.
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
+enum BlindedFailure {
+       FromIntroductionNode,
+       FromBlindedNode,
 }
 
 /// Tracks the inbound corresponding to an outbound HTLC
@@ -211,6 +320,7 @@ pub(crate) struct HTLCPreviousHopData {
        htlc_id: u64,
        incoming_packet_shared_secret: [u8; 32],
        phantom_shared_secret: Option<[u8; 32]>,
+       blinded_failure: Option<BlindedFailure>,
 
        // This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
        // channel with a preimage provided by the forward channel.
@@ -396,16 +506,6 @@ impl HTLCSource {
        }
 }
 
-/// Invalid inbound onion payment.
-pub struct InboundOnionErr {
-       /// BOLT 4 error code.
-       pub err_code: u16,
-       /// Data attached to this error.
-       pub err_data: Vec<u8>,
-       /// Error message text.
-       pub msg: &'static str,
-}
-
 /// This enum is used to specify which error data to send to peers when failing back an HTLC
 /// using [`ChannelManager::fail_htlc_backwards_with_reason`].
 ///
@@ -922,7 +1022,7 @@ pub trait AChannelManager {
        /// A type implementing [`WriteableEcdsaChannelSigner`].
        type Signer: WriteableEcdsaChannelSigner + Sized;
        /// A type implementing [`SignerProvider`] for [`Self::Signer`].
-       type SignerProvider: SignerProvider<Signer = Self::Signer> + ?Sized;
+       type SignerProvider: SignerProvider<EcdsaSigner= Self::Signer> + ?Sized;
        /// A type that may be dereferenced to [`Self::SignerProvider`].
        type SP: Deref<Target = Self::SignerProvider>;
        /// A type implementing [`FeeEstimator`].
@@ -944,7 +1044,7 @@ pub trait AChannelManager {
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> AChannelManager
 for ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -961,7 +1061,7 @@ where
        type ES = ES;
        type NodeSigner = NS::Target;
        type NS = NS;
-       type Signer = <SP::Target as SignerProvider>::Signer;
+       type Signer = <SP::Target as SignerProvider>::EcdsaSigner;
        type SignerProvider = SP::Target;
        type SP = SP;
        type FeeEstimator = F::Target;
@@ -1055,7 +1155,7 @@ where
 //              |
 //              |__`peer_state`
 //                  |
-//                  |__`id_to_peer`
+//                  |__`outpoint_to_peer`
 //                  |
 //                  |__`short_to_chan_info`
 //                  |
@@ -1069,7 +1169,7 @@ where
 //
 pub struct ChannelManager<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -1149,11 +1249,7 @@ where
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        outbound_scid_aliases: Mutex<HashSet<u64>>,
 
-       /// `channel_id` -> `counterparty_node_id`.
-       ///
-       /// Only `channel_id`s are allowed as keys in this map, and not `temporary_channel_id`s. As
-       /// multiple channels with the same `temporary_channel_id` to different peers can exist,
-       /// allowing `temporary_channel_id`s in this map would cause collisions for such channels.
+       /// Channel funding outpoint -> `counterparty_node_id`.
        ///
        /// Note that this map should only be used for `MonitorEvent` handling, to be able to access
        /// the corresponding channel for the event, as we only have access to the `channel_id` during
@@ -1171,7 +1267,10 @@ where
        /// required to access the channel with the `counterparty_node_id`.
        ///
        /// See `ChannelManager` struct-level documentation for lock order requirements.
-       id_to_peer: Mutex<HashMap<ChannelId, PublicKey>>,
+       #[cfg(not(test))]
+       outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
+       #[cfg(test)]
+       pub(crate) outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
 
        /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
        ///
@@ -1437,13 +1536,11 @@ pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3;
 // then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
 // failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
 // LATENCY_GRACE_PERIOD_BLOCKS.
-#[deny(const_err)]
 #[allow(dead_code)]
 const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
 
 // Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
 // ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed.
-#[deny(const_err)]
 #[allow(dead_code)]
 const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
 
@@ -1884,7 +1981,10 @@ macro_rules! handle_error {
                                        }
                                }
 
-                               log_error!($self.logger, "{}", err.err);
+                               let logger = WithContext::from(
+                                       &$self.logger, Some($counterparty_node_id), chan_id.map(|(chan_id, _)| chan_id)
+                               );
+                               log_error!(logger, "{}", err.err);
                                if let msgs::ErrorAction::IgnoreError = err.action {
                                } else {
                                        msg_events.push(events::MessageSendEvent::HandleError {
@@ -1906,20 +2006,13 @@ macro_rules! handle_error {
                        },
                }
        } };
-       ($self: ident, $internal: expr) => {
-               match $internal {
-                       Ok(res) => Ok(res),
-                       Err((chan, msg_handle_err)) => {
-                               let counterparty_node_id = chan.get_counterparty_node_id();
-                               handle_error!($self, Err(msg_handle_err), counterparty_node_id).map_err(|err| (chan, err))
-                       },
-               }
-       };
 }
 
 macro_rules! update_maps_on_chan_removal {
        ($self: expr, $channel_context: expr) => {{
-               $self.id_to_peer.lock().unwrap().remove(&$channel_context.channel_id());
+               if let Some(outpoint) = $channel_context.get_funding_txo() {
+                       $self.outpoint_to_peer.lock().unwrap().remove(&outpoint);
+               }
                let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
                if let Some(short_id) = $channel_context.get_short_channel_id() {
                        short_to_chan_info.remove(&short_id);
@@ -1948,7 +2041,8 @@ macro_rules! convert_chan_phase_err {
                                (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
                        },
                        ChannelError::Close(msg) => {
-                               log_error!($self.logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
+                               let logger = WithChannelContext::from(&$self.logger, &$channel.context);
+                               log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
                                update_maps_on_chan_removal!($self, $channel.context);
                                let shutdown_res = $channel.context.force_shutdown(true);
                                let user_id = $channel.context.get_user_id();
@@ -2074,7 +2168,8 @@ macro_rules! emit_channel_ready_event {
 
 macro_rules! handle_monitor_update_completion {
        ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
-               let mut updates = $chan.monitor_updating_restored(&$self.logger,
+               let logger = WithChannelContext::from(&$self.logger, &$chan.context);
+               let mut updates = $chan.monitor_updating_restored(&&logger,
                        &$self.node_signer, $self.chain_hash, &$self.default_configuration,
                        $self.best_block.read().unwrap().height());
                let counterparty_node_id = $chan.context.get_counterparty_node_id();
@@ -2169,14 +2264,15 @@ macro_rules! handle_monitor_update_completion {
 macro_rules! handle_new_monitor_update {
        ($self: ident, $update_res: expr, $chan: expr, _internal, $completed: expr) => { {
                debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
+               let logger = WithChannelContext::from(&$self.logger, &$chan.context);
                match $update_res {
                        ChannelMonitorUpdateStatus::UnrecoverableError => {
                                let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
-                               log_error!($self.logger, "{}", err_str);
+                               log_error!(logger, "{}", err_str);
                                panic!("{}", err_str);
                        },
                        ChannelMonitorUpdateStatus::InProgress => {
-                               log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
+                               log_debug!(logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
                                        &$chan.context.channel_id());
                                false
                        },
@@ -2284,7 +2380,7 @@ macro_rules! process_events_body {
 
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -2335,7 +2431,7 @@ where
                        forward_htlcs: Mutex::new(HashMap::new()),
                        claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }),
                        pending_intercepted_htlcs: Mutex::new(HashMap::new()),
-                       id_to_peer: Mutex::new(HashMap::new()),
+                       outpoint_to_peer: Mutex::new(HashMap::new()),
                        short_to_chan_info: FairRwLock::new(HashMap::new()),
 
                        our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
@@ -2486,7 +2582,7 @@ where
        fn list_funded_channels_with_filter<Fn: FnMut(&(&ChannelId, &Channel<SP>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
                // Allocate our best estimate of the number of channels we have in the `res`
                // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
-               // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
+               // a scid or a scid alias, and the `outpoint_to_peer` shouldn't be used outside
                // of the ChannelMonitor handling. Therefore reallocations may still occur, but is
                // unlikely as the `short_to_chan_info` map often contains 2 entries for
                // the same channel.
@@ -2519,7 +2615,7 @@ where
        pub fn list_channels(&self) -> Vec<ChannelDetails> {
                // Allocate our best estimate of the number of channels we have in the `res`
                // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
-               // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
+               // a scid or a scid alias, and the `outpoint_to_peer` shouldn't be used outside
                // of the ChannelMonitor handling. Therefore reallocations may still occur, but is
                // unlikely as the `short_to_chan_info` map often contains 2 entries for
                // the same channel.
@@ -2633,9 +2729,10 @@ where
        fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
-               let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
-               let shutdown_result;
-               loop {
+               let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
+               let mut shutdown_result = None;
+
+               {
                        let per_peer_state = self.per_peer_state.read().unwrap();
 
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
@@ -2649,11 +2746,9 @@ where
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                let funding_txo_opt = chan.context.get_funding_txo();
                                                let their_features = &peer_state.latest_features;
-                                               let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) =
+                                               let (shutdown_msg, mut monitor_update_opt, htlcs) =
                                                        chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
                                                failed_htlcs = htlcs;
-                                               shutdown_result = local_shutdown_result;
-                                               debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
 
                                                // We can send the `shutdown` message before updating the `ChannelMonitor`
                                                // here as we don't need the monitor update to complete until we send a
@@ -2670,30 +2765,20 @@ where
                                                if let Some(monitor_update) = monitor_update_opt.take() {
                                                        handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
                                                                peer_state_lock, peer_state, per_peer_state, chan);
-                                                       break;
-                                               }
-
-                                               if chan.is_shutdown() {
-                                                       if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
-                                                               if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                               msg: channel_update
-                                                                       });
-                                                               }
-                                                               self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
-                                                       }
                                                }
-                                               break;
+                                       } else {
+                                               self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed);
+                                               let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
+                                               shutdown_result = Some(chan_phase.context_mut().force_shutdown(false));
                                        }
                                },
                                hash_map::Entry::Vacant(_) => {
-                                       // If we reach this point, it means that the channel_id either refers to an unfunded channel or
-                                       // it does not exist for this peer. Either way, we can attempt to force-close it.
-                                       //
-                                       // An appropriate error will be returned for non-existence of the channel if that's the case.
-                                       mem::drop(peer_state_lock);
-                                       mem::drop(per_peer_state);
-                                       return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
+                                       return Err(APIError::ChannelUnavailable {
+                                               err: format!(
+                                                       "Channel with id {} not found for the passed counterparty node_id {}",
+                                                       channel_id, counterparty_node_id,
+                                               )
+                                       });
                                },
                        }
                }
@@ -2778,7 +2863,10 @@ where
                        debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
                }
 
-               log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
+               let logger = WithContext::from(
+                       &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id),
+               );
+               log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
                for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
                        let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
@@ -2833,8 +2921,9 @@ where
                        } else {
                                ClosureReason::HolderForceClosed
                        };
+                       let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id));
                        if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
-                               log_error!(self.logger, "Force-closing channel {}", channel_id);
+                               log_error!(logger, "Force-closing channel {}", channel_id);
                                self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
                                let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
                                mem::drop(peer_state);
@@ -2851,7 +2940,7 @@ where
                                        },
                                }
                        } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
-                               log_error!(self.logger, "Force-closing channel {}", &channel_id);
+                               log_error!(logger, "Force-closing channel {}", &channel_id);
                                // N.B. that we don't send any channel close event here: we
                                // don't have a user_channel_id, and we never sent any opening
                                // events anyway.
@@ -2936,7 +3025,7 @@ where
        }
 
        fn decode_update_add_htlc_onion(
-               &self, msg: &msgs::UpdateAddHTLC
+               &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
        ) -> Result<
                (onion_utils::Hop, [u8; 32], Option<Result<PublicKey, secp256k1::Error>>), HTLCFailureMsg
        > {
@@ -2944,14 +3033,38 @@ where
                        msg, &self.node_signer, &self.logger, &self.secp_ctx
                )?;
 
+               let is_intro_node_forward = match next_hop {
+                       onion_utils::Hop::Forward {
+                               // TODO: update this when we support blinded forwarding as non-intro node
+                               next_hop_data: msgs::InboundOnionPayload::BlindedForward { .. }, ..
+                       } => true,
+                       _ => false,
+               };
+
                macro_rules! return_err {
                        ($msg: expr, $err_code: expr, $data: expr) => {
                                {
-                                       log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+                                       log_info!(
+                                               WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
+                                               "Failed to accept/forward incoming HTLC: {}", $msg
+                                       );
+                                       // If `msg.blinding_point` is set, we must always fail with malformed.
+                                       if msg.blinding_point.is_some() {
+                                               return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+                                                       channel_id: msg.channel_id,
+                                                       htlc_id: msg.htlc_id,
+                                                       sha256_of_onion: [0; 32],
+                                                       failure_code: INVALID_ONION_BLINDING,
+                                               }));
+                                       }
+
+                                       let (err_code, err_data) = if is_intro_node_forward {
+                                               (INVALID_ONION_BLINDING, &[0; 32][..])
+                                       } else { ($err_code, $data) };
                                        return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
-                                               reason: HTLCFailReason::reason($err_code, $data.to_vec())
+                                               reason: HTLCFailReason::reason(err_code, err_data.to_vec())
                                                        .get_encrypted_failure_packet(&shared_secret, &None),
                                        }));
                                }
@@ -3089,13 +3202,25 @@ where
        }
 
        fn construct_pending_htlc_status<'a>(
-               &self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32], decoded_hop: onion_utils::Hop,
-               allow_underpay: bool, next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>
+               &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, shared_secret: [u8; 32],
+               decoded_hop: onion_utils::Hop, allow_underpay: bool,
+               next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>,
        ) -> PendingHTLCStatus {
                macro_rules! return_err {
                        ($msg: expr, $err_code: expr, $data: expr) => {
                                {
-                                       log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+                                       let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
+                                       log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+                                       if msg.blinding_point.is_some() {
+                                               return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
+                                                       msgs::UpdateFailMalformedHTLC {
+                                                               channel_id: msg.channel_id,
+                                                               htlc_id: msg.htlc_id,
+                                                               sha256_of_onion: [0; 32],
+                                                               failure_code: INVALID_ONION_BLINDING,
+                                                       }
+                                               ))
+                                       }
                                        return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
@@ -3153,7 +3278,8 @@ where
                if chan.context.get_short_channel_id().is_none() {
                        return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
                }
-               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
+               let logger = WithChannelContext::from(&self.logger, &chan.context);
+               log_trace!(logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
                self.get_channel_update_for_unicast(chan)
        }
 
@@ -3169,7 +3295,8 @@ where
        /// [`channel_update`]: msgs::ChannelUpdate
        /// [`internal_closing_signed`]: Self::internal_closing_signed
        fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
-               log_trace!(self.logger, "Attempting to generate channel update for channel {}", &chan.context.channel_id());
+               let logger = WithChannelContext::from(&self.logger, &chan.context);
+               log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id());
                let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
@@ -3179,7 +3306,8 @@ where
        }
 
        fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
-               log_trace!(self.logger, "Generating channel update for channel {}", &chan.context.channel_id());
+               let logger = WithChannelContext::from(&self.logger, &chan.context);
+               log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id());
                let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
 
                let enabled = chan.context.is_usable() && match chan.channel_update_status() {
@@ -3229,24 +3357,33 @@ where
                } = args;
                // The top-level caller should hold the total_consistency_lock read lock.
                debug_assert!(self.total_consistency_lock.try_write().is_err());
-
-               log_trace!(self.logger,
-                       "Attempting to send payment with payment hash {} along path with next hop {}",
-                       payment_hash, path.hops.first().unwrap().short_channel_id);
                let prng_seed = self.entropy_source.get_secure_random_bytes();
                let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
 
                let (onion_packet, htlc_msat, htlc_cltv) = onion_utils::create_payment_onion(
                        &self.secp_ctx, &path, &session_priv, total_value, recipient_onion, cur_height,
                        payment_hash, keysend_preimage, prng_seed
-               )?;
+               ).map_err(|e| {
+                       let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+                       log_error!(logger, "Failed to build an onion for path for payment hash {}", payment_hash);
+                       e
+               })?;
 
                let err: Result<(), _> = loop {
                        let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
-                               None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
+                               None => {
+                                       let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+                                       log_error!(logger, "Failed to find first-hop for payment hash {}", payment_hash);
+                                       return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()})
+                               },
                                Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
                        };
 
+                       let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id));
+                       log_trace!(logger,
+                               "Attempting to send payment with payment hash {} along path with next hop {}",
+                               payment_hash, path.hops.first().unwrap().short_channel_id);
+
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
                                .ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
@@ -3259,13 +3396,14 @@ where
                                                        return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
                                                }
                                                let funding_txo = chan.context.get_funding_txo().unwrap();
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
                                                let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(),
                                                        htlc_cltv, HTLCSource::OutboundRoute {
                                                                path: path.clone(),
                                                                session_priv: session_priv.clone(),
                                                                first_hop_htlc_msat: htlc_msat,
                                                                payment_id,
-                                                       }, onion_packet, None, &self.fee_estimator, &self.logger);
+                                                       }, onion_packet, None, &self.fee_estimator, &&logger);
                                                match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
                                                        Some(monitor_update) => {
                                                                match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
@@ -3295,7 +3433,6 @@ where
                        }
                        return Ok(());
                };
-
                match handle_error!(self, err, path.hops.first().unwrap().pubkey) {
                        Ok(_) => unreachable!(),
                        Err(e) => {
@@ -3617,11 +3754,13 @@ where
 
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
+               let funding_txo;
                let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
-                       Some(ChannelPhase::UnfundedOutboundV1(chan)) => {
-                               let funding_txo = find_funding_output(&chan, &funding_transaction)?;
+                       Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
+                               funding_txo = find_funding_output(&chan, &funding_transaction)?;
 
-                               let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &self.logger)
+                               let logger = WithChannelContext::from(&self.logger, &chan.context);
+                               let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger)
                                        .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
                                                let channel_id = chan.context.channel_id();
                                                let user_id = chan.context.get_user_id();
@@ -3630,11 +3769,10 @@ where
                                                (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
                                        } else { unreachable!(); });
                                match funding_res {
-                                       Ok((chan, funding_msg)) => (chan, funding_msg),
+                                       Ok(funding_msg) => (chan, funding_msg),
                                        Err((chan, err)) => {
                                                mem::drop(peer_state_lock);
                                                mem::drop(per_peer_state);
-
                                                let _: Result<(), _> = handle_error!(self, Err(err), chan.context.get_counterparty_node_id());
                                                return Err(APIError::ChannelUnavailable {
                                                        err: "Signer refused to sign the initial commitment transaction".to_owned()
@@ -3667,11 +3805,11 @@ where
                                panic!("Generated duplicate funding txid?");
                        },
                        hash_map::Entry::Vacant(e) => {
-                               let mut id_to_peer = self.id_to_peer.lock().unwrap();
-                               if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
-                                       panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
+                               let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
+                               if outpoint_to_peer.insert(funding_txo, chan.context.get_counterparty_node_id()).is_some() {
+                                       panic!("outpoint_to_peer map already contained funding outpoint, which shouldn't be possible");
                                }
-                               e.insert(ChannelPhase::Funded(chan));
+                               e.insert(ChannelPhase::UnfundedOutboundV1(chan));
                        }
                }
                Ok(())
@@ -3998,7 +4136,8 @@ where
                                None => {
                                        let error = format!("Channel with id {} not found for the passed counterparty node_id {}",
                                                next_hop_channel_id, next_node_id);
-                                       log_error!(self.logger, "{} when attempting to forward intercepted HTLC", error);
+                                       let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id));
+                                       log_error!(logger, "{} when attempting to forward intercepted HTLC", error);
                                        return Err(APIError::ChannelUnavailable {
                                                err: error
                                        })
@@ -4012,8 +4151,10 @@ where
                        })?;
 
                let routing = match payment.forward_info.routing {
-                       PendingHTLCRouting::Forward { onion_packet, .. } => {
-                               PendingHTLCRouting::Forward { onion_packet, short_channel_id: next_hop_scid }
+                       PendingHTLCRouting::Forward { onion_packet, blinded, .. } => {
+                               PendingHTLCRouting::Forward {
+                                       onion_packet, blinded, short_channel_id: next_hop_scid
+                               }
                        },
                        _ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted
                };
@@ -4057,6 +4198,7 @@ where
                                htlc_id: payment.prev_htlc_id,
                                incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
                                phantom_shared_secret: None,
+                               blinded_failure: payment.forward_info.routing.blinded_failure(),
                        });
 
                        let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10);
@@ -4083,6 +4225,7 @@ where
 
                        for (short_chan_id, mut pending_forwards) in forward_htlcs {
                                if short_chan_id != 0 {
+                                       let mut forwarding_counterparty = None;
                                        macro_rules! forwarding_channel_not_found {
                                                () => {
                                                        for forward_info in pending_forwards.drain(..) {
@@ -4096,7 +4239,8 @@ where
                                                                        }) => {
                                                                                macro_rules! failure_handler {
                                                                                        ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
-                                                                                               log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+                                                                                               let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id()));
+                                                                                               log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
 
                                                                                                let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                                        short_channel_id: prev_short_channel_id,
@@ -4105,6 +4249,7 @@ where
                                                                                                        htlc_id: prev_htlc_id,
                                                                                                        incoming_packet_shared_secret: incoming_shared_secret,
                                                                                                        phantom_shared_secret: $phantom_ss,
+                                                                                                       blinded_failure: routing.blinded_failure(),
                                                                                                });
 
                                                                                                let reason = if $next_hop_unknown {
@@ -4134,13 +4279,13 @@ where
                                                                                                }
                                                                                        }
                                                                                }
-                                                                               if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
+                                                                               if let PendingHTLCRouting::Forward { ref onion_packet, .. } = routing {
                                                                                        let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
                                                                                        if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.chain_hash) {
                                                                                                let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
                                                                                                let next_hop = match onion_utils::decode_next_payment_hop(
                                                                                                        phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
-                                                                                                       payment_hash, &self.node_signer
+                                                                                                       payment_hash, None, &self.node_signer
                                                                                                ) {
                                                                                                        Ok(res) => res,
                                                                                                        Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
@@ -4176,7 +4321,7 @@ where
                                                                                        fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
                                                                                }
                                                                        },
-                                                                       HTLCForwardInfo::FailHTLC { .. } => {
+                                                                       HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
                                                                                // Channel went away before we could fail it. This implies
                                                                                // the channel is now on chain and our counterparty is
                                                                                // trying to broadcast the HTLC-Timeout, but that's their
@@ -4194,6 +4339,7 @@ where
                                                        continue;
                                                }
                                        };
+                                       forwarding_counterparty = Some(counterparty_node_id);
                                        let per_peer_state = self.per_peer_state.read().unwrap();
                                        let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
                                        if peer_state_mutex_opt.is_none() {
@@ -4203,16 +4349,19 @@ where
                                        let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                        let peer_state = &mut *peer_state_lock;
                                        if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
                                                for forward_info in pending_forwards.drain(..) {
                                                        match forward_info {
                                                                HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
                                                                        prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
                                                                        forward_info: PendingHTLCInfo {
                                                                                incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
-                                                                               routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, ..
+                                                                               routing: PendingHTLCRouting::Forward {
+                                                                                       onion_packet, blinded, ..
+                                                                               }, skimmed_fee_msat, ..
                                                                        },
                                                                }) => {
-                                                                       log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
+                                                                       log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
                                                                        let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                user_channel_id: Some(prev_user_channel_id),
@@ -4221,14 +4370,23 @@ where
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
                                                                                // Phantom payments are only PendingHTLCRouting::Receive.
                                                                                phantom_shared_secret: None,
+                                                                               blinded_failure: blinded.map(|_| BlindedFailure::FromIntroductionNode),
+                                                                       });
+                                                                       let next_blinding_point = blinded.and_then(|b| {
+                                                                               let encrypted_tlvs_ss = self.node_signer.ecdh(
+                                                                                       Recipient::Node, &b.inbound_blinding_point, None
+                                                                               ).unwrap().secret_bytes();
+                                                                               onion_utils::next_hop_pubkey(
+                                                                                       &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
+                                                                               ).ok()
                                                                        });
                                                                        if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat,
                                                                                payment_hash, outgoing_cltv_value, htlc_source.clone(),
-                                                                               onion_packet, skimmed_fee_msat, &self.fee_estimator,
-                                                                               &self.logger)
+                                                                               onion_packet, skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
+                                                                               &&logger)
                                                                        {
                                                                                if let ChannelError::Ignore(msg) = e {
-                                                                                       log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
+                                                                                       log_trace!(logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
                                                                                } else {
                                                                                        panic!("Stated return value requirements in send_htlc() were not met");
                                                                                }
@@ -4244,12 +4402,12 @@ where
                                                                        panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
                                                                },
                                                                HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
-                                                                       log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+                                                                       log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
                                                                        if let Err(e) = chan.queue_fail_htlc(
-                                                                               htlc_id, err_packet, &self.logger
+                                                                               htlc_id, err_packet, &&logger
                                                                        ) {
                                                                                if let ChannelError::Ignore(msg) = e {
-                                                                                       log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+                                                                                       log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
                                                                                } else {
                                                                                        panic!("Stated return value requirements in queue_fail_htlc() were not met");
                                                                                }
@@ -4259,6 +4417,20 @@ where
                                                                                continue;
                                                                        }
                                                                },
+                                                               HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+                                                                       log_trace!(self.logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+                                                                       if let Err(e) = chan.queue_fail_malformed_htlc(htlc_id, failure_code, sha256_of_onion, &self.logger) {
+                                                                               if let ChannelError::Ignore(msg) = e {
+                                                                                       log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+                                                                               } else {
+                                                                                       panic!("Stated return value requirements in queue_fail_malformed_htlc() were not met");
+                                                                               }
+                                                                               // fail-backs are best-effort, we probably already have one
+                                                                               // pending, and if not that's OK, if not, the channel is on
+                                                                               // the chain and sending the HTLC-Timeout is their problem.
+                                                                               continue;
+                                                                       }
+                                                               },
                                                        }
                                                }
                                        } else {
@@ -4275,8 +4447,12 @@ where
                                                                        skimmed_fee_msat, ..
                                                                }
                                                        }) => {
+                                                               let blinded_failure = routing.blinded_failure();
                                                                let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing {
-                                                                       PendingHTLCRouting::Receive { payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret, custom_tlvs } => {
+                                                                       PendingHTLCRouting::Receive {
+                                                                               payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret,
+                                                                               custom_tlvs, requires_blinded_error: _
+                                                                       } => {
                                                                                let _legacy_hop_data = Some(payment_data.clone());
                                                                                let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret),
                                                                                                payment_metadata, custom_tlvs };
@@ -4304,6 +4480,7 @@ where
                                                                                htlc_id: prev_htlc_id,
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
                                                                                phantom_shared_secret,
+                                                                               blinded_failure,
                                                                        },
                                                                        // We differentiate the received value from the sender intended value
                                                                        // if possible so that we don't prematurely mark MPP payments complete
@@ -4334,6 +4511,7 @@ where
                                                                                                htlc_id: $htlc.prev_hop.htlc_id,
                                                                                                incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
                                                                                                phantom_shared_secret,
+                                                                                               blinded_failure,
                                                                                        }), payment_hash,
                                                                                        HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
                                                                                        HTLCDestination::FailedPayment { payment_hash: $payment_hash },
@@ -4507,7 +4685,7 @@ where
                                                                        },
                                                                };
                                                        },
-                                                       HTLCForwardInfo::FailHTLC { .. } => {
+                                                       HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
                                                                panic!("Got pending fail of our own HTLC");
                                                        }
                                                }
@@ -4614,23 +4792,26 @@ where
 
        fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
                if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
+
+               let logger = WithChannelContext::from(&self.logger, &chan.context);
+
                // If the feerate has decreased by less than half, don't bother
                if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
                        if new_feerate != chan.context.get_feerate_sat_per_1000_weight() {
-                               log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
+                               log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {}.",
                                chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
                        }
                        return NotifyOption::SkipPersistNoEvents;
                }
                if !chan.context.is_live() {
-                       log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
+                       log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
                                chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
                        return NotifyOption::SkipPersistNoEvents;
                }
-               log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
+               log_trace!(logger, "Channel {} qualifies for a feerate change from {} to {}.",
                        &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
 
-               chan.queue_update_fee(new_feerate, &self.fee_estimator, &self.logger);
+               chan.queue_update_fee(new_feerate, &self.fee_estimator, &&logger);
                NotifyOption::DoPersist
        }
 
@@ -4709,7 +4890,8 @@ where
                        | {
                                context.maybe_expire_prev_config();
                                if unfunded_context.should_expire_unfunded_channel() {
-                                       log_error!(self.logger,
+                                       let logger = WithChannelContext::from(&self.logger, context);
+                                       log_error!(logger,
                                                "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
                                        update_maps_on_chan_removal!(self, &context);
                                        self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
@@ -4794,7 +4976,8 @@ where
                                                                chan.context.maybe_expire_prev_config();
 
                                                                if chan.should_disconnect_peer_awaiting_response() {
-                                                                       log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
+                                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                                       log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}",
                                                                                        counterparty_node_id, chan_id);
                                                                        pending_msg_events.push(MessageSendEvent::HandleError {
                                                                                node_id: counterparty_node_id,
@@ -4822,7 +5005,8 @@ where
 
                                        for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
                                                if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
-                                                       log_error!(self.logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
+                                                       let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id));
+                                                       log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
                                                        peer_state.pending_msg_events.push(
                                                                events::MessageSendEvent::HandleError {
                                                                        node_id: counterparty_node_id,
@@ -5097,9 +5281,37 @@ where
                                        &self.pending_events, &self.logger)
                                { self.push_pending_forwards_ev(); }
                        },
-                       HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint, .. }) => {
-                               log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", &payment_hash, onion_error);
-                               let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret);
+                       HTLCSource::PreviousHopData(HTLCPreviousHopData {
+                               ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
+                               ref phantom_shared_secret, ref outpoint, ref blinded_failure, ..
+                       }) => {
+                               log_trace!(
+                                       WithContext::from(&self.logger, None, Some(outpoint.to_channel_id())),
+                                       "Failing {}HTLC with payment_hash {} backwards from us: {:?}",
+                                       if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
+                               );
+                               let failure = match blinded_failure {
+                                       Some(BlindedFailure::FromIntroductionNode) => {
+                                               let blinded_onion_error = HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]);
+                                               let err_packet = blinded_onion_error.get_encrypted_failure_packet(
+                                                       incoming_packet_shared_secret, phantom_shared_secret
+                                               );
+                                               HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
+                                       },
+                                       Some(BlindedFailure::FromBlindedNode) => {
+                                               HTLCForwardInfo::FailMalformedHTLC {
+                                                       htlc_id: *htlc_id,
+                                                       failure_code: INVALID_ONION_BLINDING,
+                                                       sha256_of_onion: [0; 32]
+                                               }
+                                       },
+                                       None => {
+                                               let err_packet = onion_error.get_encrypted_failure_packet(
+                                                       incoming_packet_shared_secret, phantom_shared_secret
+                                               );
+                                               HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
+                                       }
+                               };
 
                                let mut push_forward_ev = false;
                                let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
@@ -5108,10 +5320,10 @@ where
                                }
                                match forward_htlcs.entry(*short_channel_id) {
                                        hash_map::Entry::Occupied(mut entry) => {
-                                               entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet });
+                                               entry.get_mut().push(failure);
                                        },
                                        hash_map::Entry::Vacant(entry) => {
-                                               entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }));
+                                               entry.insert(vec!(failure));
                                        }
                                }
                                mem::drop(forward_htlcs);
@@ -5259,6 +5471,7 @@ where
                }
                if valid_mpp {
                        for htlc in sources.drain(..) {
+                               let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id();
                                if let Err((pk, err)) = self.claim_funds_from_hop(
                                        htlc.prev_hop, payment_preimage,
                                        |_, definitely_duplicate| {
@@ -5269,7 +5482,8 @@ where
                                        if let msgs::ErrorAction::IgnoreError = err.err.action {
                                                // We got a temporary failure updating monitor, but will claim the
                                                // HTLC when the monitor updating is restored (or on chain).
-                                               log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
+                                               let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id));
+                                               log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
                                        } else { errs.push((pk, err)); }
                                }
                        }
@@ -5327,12 +5541,13 @@ where
                                if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                let counterparty_node_id = chan.context.get_counterparty_node_id();
-                                               let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                               let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &&logger);
 
                                                match fulfill_res {
                                                        UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => {
                                                                if let Some(action) = completion_action(Some(htlc_value_msat), false) {
-                                                                       log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+                                                                       log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}",
                                                                                chan_id, action);
                                                                        peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
                                                                }
@@ -5359,7 +5574,7 @@ where
                                                                };
                                                                mem::drop(peer_state_lock);
 
-                                                               log_trace!(self.logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
+                                                               log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
                                                                        chan_id, action);
                                                                let (node_id, funding_outpoint, blocker) =
                                                                if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
@@ -5402,6 +5617,7 @@ where
                }
                let preimage_update = ChannelMonitorUpdate {
                        update_id: CLOSED_CHANNEL_UPDATE_ID,
+                       counterparty_node_id: None,
                        updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
                                payment_preimage,
                        }],
@@ -5416,7 +5632,7 @@ where
                                // with a preimage we *must* somehow manage to propagate it to the upstream
                                // channel, or we must have an ability to receive the same event and try
                                // again on restart.
-                               log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+                               log_error!(WithContext::from(&self.logger, None, Some(prev_hop.outpoint.to_channel_id())), "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
                                        payment_preimage, update_res);
                        }
                } else {
@@ -5628,7 +5844,8 @@ where
                pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
                channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
        -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
-               log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
+               let logger = WithChannelContext::from(&self.logger, &channel.context);
+               log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
                        &channel.context.channel_id(),
                        if raa.is_some() { "an" } else { "no" },
                        if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
@@ -5682,7 +5899,7 @@ where
                }
 
                if let Some(tx) = funding_broadcastable {
-                       log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
+                       log_info!(logger, "Broadcasting funding transaction with txid {}", tx.txid());
                        self.tx_broadcaster.broadcast_transactions(&[&tx]);
                }
 
@@ -5702,9 +5919,9 @@ where
                        Some(cp_id) => cp_id.clone(),
                        None => {
                                // TODO: Once we can rely on the counterparty_node_id from the
-                               // monitor event, this and the id_to_peer map should be removed.
-                               let id_to_peer = self.id_to_peer.lock().unwrap();
-                               match id_to_peer.get(&funding_txo.to_channel_id()) {
+                               // monitor event, this and the outpoint_to_peer map should be removed.
+                               let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
+                               match outpoint_to_peer.get(&funding_txo) {
                                        Some(cp_id) => cp_id.clone(),
                                        None => return,
                                }
@@ -5732,7 +5949,8 @@ where
                                pending.retain(|upd| upd.update_id > highest_applied_update_id);
                                pending.len()
                        } else { 0 };
-               log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
+               let logger = WithChannelContext::from(&self.logger, &channel.context);
+               log_trace!(logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
                        highest_applied_update_id, channel.context.get_latest_monitor_update_id(),
                        remaining_in_flight);
                if !channel.is_awaiting_monitor_update() || channel.context.get_latest_monitor_update_id() != highest_applied_update_id {
@@ -5784,13 +6002,20 @@ where
        }
 
        fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
+
+               let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id));
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let peers_without_funded_channels =
                        self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
-                       .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+               .ok_or_else(|| {
+                       let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id); 
+                       log_error!(logger, "{}", err_str);
+
+                       APIError::ChannelUnavailable { err: err_str } 
+               })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let is_only_peer_channel = peer_state.total_channel_count() == 1;
@@ -5805,9 +6030,19 @@ where
                                InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
                                        counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features,
                                        &unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height,
-                                       &self.logger, accept_0conf).map_err(|e| APIError::ChannelUnavailable { err: e.to_string() })
+                                       &self.logger, accept_0conf).map_err(|e| {
+                                               let err_str = e.to_string();
+                                               log_error!(logger, "{}", err_str);
+
+                                               APIError::ChannelUnavailable { err: err_str }
+                                       })
+                               }
+                       _ => { 
+                               let err_str = "No such channel awaiting to be accepted.".to_owned();
+                               log_error!(logger, "{}", err_str);
+
+                               Err(APIError::APIMisuseError { err: err_str })
                        }
-                       _ => Err(APIError::APIMisuseError { err: "No such channel awaiting to be accepted.".to_owned() })
                }?;
 
                if accept_0conf {
@@ -5821,7 +6056,10 @@ where
                                }
                        };
                        peer_state.pending_msg_events.push(send_msg_err_event);
-                       return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+                       let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
+                       log_error!(logger, "{}", err_str);
+
+                       return Err(APIError::APIMisuseError { err: err_str });
                } else {
                        // If this peer already has some channels, a new channel won't increase our number of peers
                        // with unfunded channels, so as long as we aren't over the maximum number of unfunded
@@ -5834,7 +6072,10 @@ where
                                        }
                                };
                                peer_state.pending_msg_events.push(send_msg_err_event);
-                               return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() });
+                               let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
+                               log_error!(logger, "{}", err_str);
+
+                               return Err(APIError::APIMisuseError { err: err_str });
                        }
                }
 
@@ -5957,13 +6198,18 @@ where
 
                // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
                if self.default_configuration.manually_accept_inbound_channels {
+                       let channel_type = channel::channel_type_from_open_channel(
+                                       &msg, &peer_state.latest_features, &self.channel_type_features()
+                               ).map_err(|e|
+                                       MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id)
+                               )?;
                        let mut pending_events = self.pending_events.lock().unwrap();
                        pending_events.push_back((events::Event::OpenChannelRequest {
                                temporary_channel_id: msg.temporary_channel_id.clone(),
                                counterparty_node_id: counterparty_node_id.clone(),
                                funding_satoshis: msg.funding_satoshis,
                                push_msat: msg.push_msat,
-                               channel_type: msg.channel_type.clone().unwrap(),
+                               channel_type,
                        }, None));
                        peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest {
                                open_channel_msg: msg.clone(),
@@ -6055,49 +6301,61 @@ where
 
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
-               let (chan, funding_msg_opt, monitor) =
+               let (mut chan, funding_msg_opt, monitor) =
                        match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
                                Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
-                                       match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &self.logger) {
+                                       let logger = WithChannelContext::from(&self.logger, &inbound_chan.context);
+                                       match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
                                                Ok(res) => res,
-                                               Err((mut inbound_chan, err)) => {
+                                               Err((inbound_chan, err)) => {
                                                        // We've already removed this inbound channel from the map in `PeerState`
                                                        // above so at this point we just need to clean up any lingering entries
                                                        // concerning this channel as it is safe to do so.
-                                                       update_maps_on_chan_removal!(self, &inbound_chan.context);
-                                                       let user_id = inbound_chan.context.get_user_id();
-                                                       let shutdown_res = inbound_chan.context.force_shutdown(false);
-                                                       return Err(MsgHandleErrInternal::from_finish_shutdown(format!("{}", err),
-                                                               msg.temporary_channel_id, user_id, shutdown_res, None, inbound_chan.context.get_value_satoshis()));
+                                                       debug_assert!(matches!(err, ChannelError::Close(_)));
+                                                       // Really we should be returning the channel_id the peer expects based
+                                                       // on their funding info here, but they're horribly confused anyway, so
+                                                       // there's not a lot we can do to save them.
+                                                       return Err(convert_chan_phase_err!(self, err, &mut ChannelPhase::UnfundedInboundV1(inbound_chan), &msg.temporary_channel_id).1);
                                                },
                                        }
                                },
-                               Some(ChannelPhase::Funded(_)) | Some(ChannelPhase::UnfundedOutboundV1(_)) => {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
+                               Some(mut phase) => {
+                                       let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id);
+                                       let err = ChannelError::Close(err_msg);
+                                       return Err(convert_chan_phase_err!(self, err, &mut phase, &msg.temporary_channel_id).1);
                                },
                                None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
                        };
 
-               match peer_state.channel_by_id.entry(chan.context.channel_id()) {
+               let funded_channel_id = chan.context.channel_id();
+
+               macro_rules! fail_chan { ($err: expr) => { {
+                       // Note that at this point we've filled in the funding outpoint on our
+                       // channel, but its actually in conflict with another channel. Thus, if
+                       // we call `convert_chan_phase_err` immediately (thus calling
+                       // `update_maps_on_chan_removal`), we'll remove the existing channel
+                       // from `outpoint_to_peer`. Thus, we must first unset the funding outpoint
+                       // on the channel.
+                       let err = ChannelError::Close($err.to_owned());
+                       chan.unset_funding_info(msg.temporary_channel_id);
+                       return Err(convert_chan_phase_err!(self, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1);
+               } } }
+
+               match peer_state.channel_by_id.entry(funded_channel_id) {
                        hash_map::Entry::Occupied(_) => {
-                               Err(MsgHandleErrInternal::send_err_msg_no_close(
-                                       "Already had channel with the new channel_id".to_owned(),
-                                       chan.context.channel_id()
-                               ))
+                               fail_chan!("Already had channel with the new channel_id");
                        },
                        hash_map::Entry::Vacant(e) => {
-                               let mut id_to_peer_lock = self.id_to_peer.lock().unwrap();
-                               match id_to_peer_lock.entry(chan.context.channel_id()) {
+                               let mut outpoint_to_peer_lock = self.outpoint_to_peer.lock().unwrap();
+                               match outpoint_to_peer_lock.entry(monitor.get_funding_txo().0) {
                                        hash_map::Entry::Occupied(_) => {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close(
-                                                       "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
-                                                       chan.context.channel_id()))
+                                               fail_chan!("The funding_created message had the same funding_txid as an existing channel - funding is not possible");
                                        },
                                        hash_map::Entry::Vacant(i_e) => {
                                                let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
                                                if let Ok(persist_state) = monitor_res {
                                                        i_e.insert(chan.context.get_counterparty_node_id());
-                                                       mem::drop(id_to_peer_lock);
+                                                       mem::drop(outpoint_to_peer_lock);
 
                                                        // There's no problem signing a counterparty's funding transaction if our monitor
                                                        // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
@@ -6118,14 +6376,9 @@ where
                                                        }
                                                        Ok(())
                                                } else {
-                                                       log_error!(self.logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
-                                                       let channel_id = match funding_msg_opt {
-                                                               Some(msg) => msg.channel_id,
-                                                               None => chan.context.channel_id(),
-                                                       };
-                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close(
-                                                               "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
-                                                               channel_id));
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
+                                                       fail_chan!("Duplicate funding outpoint");
                                                }
                                        }
                                }
@@ -6145,21 +6398,43 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
-                               match chan_phase_entry.get_mut() {
-                                       ChannelPhase::Funded(ref mut chan) => {
-                                               let monitor = try_chan_phase_entry!(self,
-                                                       chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry);
-                                               if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
-                                                       handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
-                                                       Ok(())
-                                               } else {
-                                                       try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry)
+                       hash_map::Entry::Occupied(chan_phase_entry) => {
+                               if matches!(chan_phase_entry.get(), ChannelPhase::UnfundedOutboundV1(_)) {
+                                       let chan = if let ChannelPhase::UnfundedOutboundV1(chan) = chan_phase_entry.remove() { chan } else { unreachable!() };
+                                       let logger = WithContext::from(
+                                               &self.logger,
+                                               Some(chan.context.get_counterparty_node_id()),
+                                               Some(chan.context.channel_id())
+                                       );
+                                       let res =
+                                               chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
+                                       match res {
+                                               Ok((chan, monitor)) => {
+                                                       if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
+                                                               // We really should be able to insert here without doing a second
+                                                               // lookup, but sadly rust stdlib doesn't currently allow keeping
+                                                               // the original Entry around with the value removed.
+                                                               let mut chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan));
+                                                               if let ChannelPhase::Funded(ref mut chan) = &mut chan {
+                                                                       handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
+                                                               } else { unreachable!(); }
+                                                               Ok(())
+                                                       } else {
+                                                               let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
+                                                               return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
+                                                       }
+                                               },
+                                               Err((chan, e)) => {
+                                                       debug_assert!(matches!(e, ChannelError::Close(_)),
+                                                               "We don't have a channel anymore, so the error better have expected close");
+                                                       // We've already removed this outbound channel from the map in
+                                                       // `PeerState` above so at this point we just need to clean up any
+                                                       // lingering entries concerning this channel as it is safe to do so.
+                                                       return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::UnfundedOutboundV1(chan), &msg.channel_id).1);
                                                }
-                                       },
-                                       _ => {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
-                                       },
+                                       }
+                               } else {
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
                                }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
@@ -6180,10 +6455,11 @@ where
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
                                        let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer,
-                                               self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
+                                               self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry);
                                        if let Some(announcement_sigs) = announcement_sigs_opt {
-                                               log_trace!(self.logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
+                                               log_trace!(logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
                                                        node_id: counterparty_node_id.clone(),
                                                        msg: announcement_sigs,
@@ -6194,7 +6470,7 @@ where
                                                // counterparty's announcement_signatures. Thus, we only bother to send a
                                                // channel_update here if the channel is not public, i.e. we're not sending an
                                                // announcement_signatures.
-                                               log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
+                                               log_trace!(logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
                                                if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
                                                        peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
                                                                node_id: counterparty_node_id.clone(),
@@ -6237,7 +6513,8 @@ where
                                match phase {
                                        ChannelPhase::Funded(chan) => {
                                                if !chan.received_shutdown() {
-                                                       log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.",
                                                                msg.channel_id,
                                                                if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
                                                }
@@ -6264,7 +6541,8 @@ where
                                        },
                                        ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
                                                let context = phase.context_mut();
-                                               log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
+                                               let logger = WithChannelContext::from(&self.logger, context);
+                                               log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
                                                self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
                                                let mut chan = remove_channel_phase!(self, chan_phase_entry);
                                                finish_shutdown = Some(chan.context_mut().force_shutdown(false));
@@ -6324,7 +6602,8 @@ where
                        }
                };
                if let Some(broadcast_tx) = tx {
-                       log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
+                       let channel_id = chan_option.as_ref().map(|channel| channel.context().channel_id());
+                       log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id), "Broadcasting {}", log_tx!(broadcast_tx));
                        self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
                }
                if let Some(ChannelPhase::Funded(chan)) = chan_option {
@@ -6357,7 +6636,7 @@ where
                // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
                // closing a channel), so any changes are likely to be lost on restart!
 
-               let decoded_hop_res = self.decode_update_add_htlc_onion(msg);
+               let decoded_hop_res = self.decode_update_add_htlc_onion(msg, counterparty_node_id);
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| {
@@ -6371,17 +6650,33 @@ where
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                        let pending_forward_info = match decoded_hop_res {
                                                Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
-                                                       self.construct_pending_htlc_status(msg, shared_secret, next_hop,
-                                                               chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt),
+                                                       self.construct_pending_htlc_status(
+                                                               msg, counterparty_node_id, shared_secret, next_hop,
+                                                               chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
+                                                       ),
                                                Err(e) => PendingHTLCStatus::Fail(e)
                                        };
                                        let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+                                               if msg.blinding_point.is_some() {
+                                                       return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
+                                                                       msgs::UpdateFailMalformedHTLC {
+                                                                               channel_id: msg.channel_id,
+                                                                               htlc_id: msg.htlc_id,
+                                                                               sha256_of_onion: [0; 32],
+                                                                               failure_code: INVALID_ONION_BLINDING,
+                                                                       }
+                                                       ))
+                                               }
                                                // If the update_add is completely bogus, the call will Err and we will close,
                                                // but if we've sent a shutdown and they haven't acknowledged it yet, we just
                                                // want to reject the new HTLC and fail it backwards instead of forwarding.
                                                match pending_forward_info {
-                                                       PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
-                                                               let reason = if (error_code & 0x1000) != 0 {
+                                                       PendingHTLCStatus::Forward(PendingHTLCInfo {
+                                                               ref incoming_shared_secret, ref routing, ..
+                                                       }) => {
+                                                               let reason = if routing.blinded_failure().is_some() {
+                                                                       HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
+                                                               } else if (error_code & 0x1000) != 0 {
                                                                        let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
                                                                        HTLCFailReason::reason(real_code, error_data)
                                                                } else {
@@ -6397,7 +6692,8 @@ where
                                                        _ => pending_forward_info
                                                }
                                        };
-                                       try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &self.logger), chan_phase_entry);
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                       try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry);
                                } else {
                                        return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                                "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
@@ -6424,7 +6720,8 @@ where
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
                                                if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
-                                                       log_trace!(self.logger,
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       log_trace!(logger,
                                                                "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
                                                                msg.channel_id);
                                                        peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
@@ -6517,8 +6814,9 @@ where
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
                                        let funding_txo = chan.context.get_funding_txo();
-                                       let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry);
+                                       let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry);
                                        if let Some(monitor_update) = monitor_update_opt {
                                                handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
                                                        peer_state, per_peer_state, chan);
@@ -6575,7 +6873,8 @@ where
                                                                                        prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
                                                                        },
                                                                        hash_map::Entry::Occupied(_) => {
-                                                                               log_info!(self.logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
+                                                                               let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id()));
+                                                                               log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
                                                                                let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                        short_channel_id: prev_short_channel_id,
                                                                                        user_channel_id: Some(prev_user_channel_id),
@@ -6583,6 +6882,7 @@ where
                                                                                        htlc_id: prev_htlc_id,
                                                                                        incoming_packet_shared_secret: forward_info.incoming_shared_secret,
                                                                                        phantom_shared_secret: None,
+                                                                                       blinded_failure: forward_info.routing.blinded_failure(),
                                                                                });
 
                                                                                failed_intercept_forwards.push((htlc_source, forward_info.payment_hash,
@@ -6683,6 +6983,7 @@ where
                        match peer_state.channel_by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
                                                let funding_txo_opt = chan.context.get_funding_txo();
                                                let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
                                                        self.raa_monitor_updates_held(
@@ -6690,7 +6991,7 @@ where
                                                                *counterparty_node_id)
                                                } else { false };
                                                let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
-                                                       chan.revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan_phase_entry);
+                                                       chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_phase_entry);
                                                if let Some(monitor_update) = monitor_update_opt {
                                                        let funding_txo = funding_txo_opt
                                                                .expect("Funding outpoint must have been set for RAA handling to succeed");
@@ -6722,7 +7023,8 @@ where
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
-                                       try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &self.logger), chan_phase_entry);
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                       try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
                                } else {
                                        return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                                "Got an update_fee message for an unfunded channel!".into())), chan_phase_entry);
@@ -6801,7 +7103,8 @@ where
                                        if were_node_one == msg_from_node_one {
                                                return Ok(NotifyOption::SkipPersistNoEvents);
                                        } else {
-                                               log_debug!(self.logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                               log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
                                                let did_change = try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
                                                // If nothing changed after applying their update, we don't need to bother
                                                // persisting.
@@ -6832,6 +7135,7 @@ where
                                                msg.channel_id
                                        )
                                })?;
+                       let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
@@ -6842,7 +7146,7 @@ where
                                                // freed HTLCs to fail backwards. If in the future we no longer drop pending
                                                // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
                                                let responses = try_chan_phase_entry!(self, chan.channel_reestablish(
-                                                       msg, &self.logger, &self.node_signer, self.chain_hash,
+                                                       msg, &&logger, &self.node_signer, self.chain_hash,
                                                        &self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
                                                let mut channel_update = None;
                                                if let Some(msg) = responses.shutdown_msg {
@@ -6875,8 +7179,8 @@ where
                                        }
                                },
                                hash_map::Entry::Vacant(_) => {
-                                       log_debug!(self.logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
-                                               log_bytes!(msg.channel_id.0));
+                                       log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
+                                               msg.channel_id);
                                        // Unfortunately, lnd doesn't force close on errors
                                        // (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119).
                                        // One of the few ways to get an lnd counterparty to force close is by
@@ -6933,11 +7237,12 @@ where
                        for monitor_event in monitor_events.drain(..) {
                                match monitor_event {
                                        MonitorEvent::HTLCEvent(htlc_update) => {
+                                               let logger = WithContext::from(&self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id()));
                                                if let Some(preimage) = htlc_update.payment_preimage {
-                                                       log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", preimage);
+                                                       log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
                                                        self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint);
                                                } else {
-                                                       log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
+                                                       log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
                                                        let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
                                                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                                                        self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
@@ -6948,9 +7253,9 @@ where
                                                        Some(cp_id) => Some(cp_id),
                                                        None => {
                                                                // TODO: Once we can rely on the counterparty_node_id from the
-                                                               // monitor event, this and the id_to_peer map should be removed.
-                                                               let id_to_peer = self.id_to_peer.lock().unwrap();
-                                                               id_to_peer.get(&funding_outpoint.to_channel_id()).cloned()
+                                                               // monitor event, this and the outpoint_to_peer map should be removed.
+                                                               let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
+                                                               outpoint_to_peer.get(&funding_outpoint).cloned()
                                                        }
                                                };
                                                if let Some(counterparty_node_id) = counterparty_node_id_opt {
@@ -7025,7 +7330,7 @@ where
                                                let counterparty_node_id = chan.context.get_counterparty_node_id();
                                                let funding_txo = chan.context.get_funding_txo();
                                                let (monitor_opt, holding_cell_failed_htlcs) =
-                                                       chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &self.logger);
+                                                       chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context));
                                                if !holding_cell_failed_htlcs.is_empty() {
                                                        failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
                                                }
@@ -7058,36 +7363,40 @@ where
        /// attempted in every channel, or in the specifically provided channel.
        ///
        /// [`ChannelSigner`]: crate::sign::ChannelSigner
-       #[cfg(test)] // This is only implemented for one signer method, and should be private until we
-                    // actually finish implementing it fully.
+       #[cfg(async_signing)]
        pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let unblock_chan = |phase: &mut ChannelPhase<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| {
                        let node_id = phase.context().get_counterparty_node_id();
-                       if let ChannelPhase::Funded(chan) = phase {
-                               let msgs = chan.signer_maybe_unblocked(&self.logger);
-                               if let Some(updates) = msgs.commitment_update {
-                                       pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                               node_id,
-                                               updates,
-                                       });
-                               }
-                               if let Some(msg) = msgs.funding_signed {
-                                       pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
-                                               node_id,
-                                               msg,
-                                       });
-                               }
-                               if let Some(msg) = msgs.funding_created {
-                                       pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
-                                               node_id,
-                                               msg,
-                                       });
+                       match phase {
+                               ChannelPhase::Funded(chan) => {
+                                       let msgs = chan.signer_maybe_unblocked(&self.logger);
+                                       if let Some(updates) = msgs.commitment_update {
+                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                       node_id,
+                                                       updates,
+                                               });
+                                       }
+                                       if let Some(msg) = msgs.funding_signed {
+                                               pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
+                                                       node_id,
+                                                       msg,
+                                               });
+                                       }
+                                       if let Some(msg) = msgs.channel_ready {
+                                               send_channel_ready!(self, pending_msg_events, chan, msg);
+                                       }
                                }
-                               if let Some(msg) = msgs.channel_ready {
-                                       send_channel_ready!(self, pending_msg_events, chan, msg);
+                               ChannelPhase::UnfundedOutboundV1(chan) => {
+                                       if let Some(msg) = chan.signer_maybe_unblocked(&self.logger) {
+                                               pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
+                                                       node_id,
+                                                       msg,
+                                               });
+                                       }
                                }
+                               ChannelPhase::UnfundedInboundV1(_) => {},
                        }
                };
 
@@ -7128,7 +7437,8 @@ where
                                peer_state.channel_by_id.retain(|channel_id, phase| {
                                        match phase {
                                                ChannelPhase::Funded(chan) => {
-                                                       match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) {
                                                                Ok((msg_opt, tx_opt, shutdown_result_opt)) => {
                                                                        if let Some(msg) = msg_opt {
                                                                                has_update = true;
@@ -7151,7 +7461,7 @@ where
 
                                                                                self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
 
-                                                                               log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
+                                                                               log_info!(logger, "Broadcasting {}", log_tx!(tx));
                                                                                self.tx_broadcaster.broadcast_transactions(&[&tx]);
                                                                                update_maps_on_chan_removal!(self, &chan.context);
                                                                                false
@@ -7214,32 +7524,43 @@ where
        ///
        /// # Privacy
        ///
-       /// Uses a one-hop [`BlindedPath`] for the offer with [`ChannelManager::get_our_node_id`] as the
-       /// introduction node and a derived signing pubkey for recipient privacy. As such, currently,
-       /// the node must be announced. Otherwise, there is no way to find a path to the introduction
-       /// node in order to send the [`InvoiceRequest`].
+       /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the offer.
+       /// However, if one is not found, uses a one-hop [`BlindedPath`] with
+       /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
+       /// the node must be announced, otherwise, there is no way to find a path to the introduction in
+       /// order to send the [`InvoiceRequest`].
+       ///
+       /// Also, uses a derived signing pubkey in the offer for recipient privacy.
        ///
        /// # Limitations
        ///
        /// Requires a direct connection to the introduction node in the responding [`InvoiceRequest`]'s
        /// reply path.
        ///
+       /// # Errors
+       ///
+       /// Errors if the parameterized [`Router`] is unable to create a blinded path for the offer.
+       ///
        /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
        ///
        /// [`Offer`]: crate::offers::offer::Offer
        /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
        pub fn create_offer_builder(
                &self, description: String
-       ) -> OfferBuilder<DerivedMetadata, secp256k1::All> {
+       ) -> Result<OfferBuilder<DerivedMetadata, secp256k1::All>, Bolt12SemanticError> {
                let node_id = self.get_our_node_id();
                let expanded_key = &self.inbound_payment_key;
                let entropy = &*self.entropy_source;
                let secp_ctx = &self.secp_ctx;
-               let path = self.create_one_hop_blinded_path();
 
-               OfferBuilder::deriving_signing_pubkey(description, node_id, expanded_key, entropy, secp_ctx)
+               let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+               let builder = OfferBuilder::deriving_signing_pubkey(
+                       description, node_id, expanded_key, entropy, secp_ctx
+               )
                        .chain_hash(self.chain_hash)
-                       .path(path)
+                       .path(path);
+
+               Ok(builder)
        }
 
        /// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
@@ -7264,10 +7585,13 @@ where
        ///
        /// # Privacy
        ///
-       /// Uses a one-hop [`BlindedPath`] for the refund with [`ChannelManager::get_our_node_id`] as
-       /// the introduction node and a derived payer id for payer privacy. As such, currently, the
-       /// node must be announced. Otherwise, there is no way to find a path to the introduction node
-       /// in order to send the [`Bolt12Invoice`].
+       /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the refund.
+       /// However, if one is not found, uses a one-hop [`BlindedPath`] with
+       /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
+       /// the node must be announced, otherwise, there is no way to find a path to the introduction in
+       /// order to send the [`Bolt12Invoice`].
+       ///
+       /// Also, uses a derived payer id in the refund for payer privacy.
        ///
        /// # Limitations
        ///
@@ -7276,14 +7600,17 @@ where
        ///
        /// # Errors
        ///
-       /// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link
-       /// or if `amount_msats` is invalid.
+       /// Errors if:
+       /// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
+       /// - `amount_msats` is invalid, or
+       /// - the parameterized [`Router`] is unable to create a blinded path for the refund.
        ///
        /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
        ///
        /// [`Refund`]: crate::offers::refund::Refund
        /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
        /// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
+       /// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
        pub fn create_refund_builder(
                &self, description: String, amount_msats: u64, absolute_expiry: Duration,
                payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
@@ -7292,8 +7619,8 @@ where
                let expanded_key = &self.inbound_payment_key;
                let entropy = &*self.entropy_source;
                let secp_ctx = &self.secp_ctx;
-               let path = self.create_one_hop_blinded_path();
 
+               let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
                let builder = RefundBuilder::deriving_payer_id(
                        description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
                )?
@@ -7351,8 +7678,11 @@ where
        ///
        /// # Errors
        ///
-       /// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link
-       /// or if the provided parameters are invalid for the offer.
+       /// Errors if:
+       /// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
+       /// - the provided parameters are invalid for the offer,
+       /// - the parameterized [`Router`] is unable to create a blinded reply path for the invoice
+       ///   request.
        ///
        /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
        /// [`InvoiceRequest::quantity`]: crate::offers::invoice_request::InvoiceRequest::quantity
@@ -7385,9 +7715,8 @@ where
                        None => builder,
                        Some(payer_note) => builder.payer_note(payer_note),
                };
-
                let invoice_request = builder.build_and_sign()?;
-               let reply_path = self.create_one_hop_blinded_path();
+               let reply_path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
 
                let expiration = StaleExpiration::TimerTicks(1);
                self.pending_outbound_payments
@@ -7436,6 +7765,11 @@ where
        /// node meeting the aforementioned criteria, but there's no guarantee that they will be
        /// received and no retries will be made.
        ///
+       /// # Errors
+       ///
+       /// Errors if the parameterized [`Router`] is unable to create a blinded payment path or reply
+       /// path for the invoice.
+       ///
        /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
        pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> {
                let expanded_key = &self.inbound_payment_key;
@@ -7447,9 +7781,9 @@ where
 
                match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
                        Ok((payment_hash, payment_secret)) => {
-                               let payment_paths = vec![
-                                       self.create_one_hop_blinded_payment_path(payment_secret),
-                               ];
+                               let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret)
+                                       .map_err(|_| Bolt12SemanticError::MissingPaths)?;
+
                                #[cfg(not(feature = "no-std"))]
                                let builder = refund.respond_using_derived_keys(
                                        payment_paths, payment_hash, expanded_key, entropy
@@ -7463,7 +7797,8 @@ where
                                        payment_paths, payment_hash, created_at, expanded_key, entropy
                                )?;
                                let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
-                               let reply_path = self.create_one_hop_blinded_path();
+                               let reply_path = self.create_blinded_path()
+                                       .map_err(|_| Bolt12SemanticError::MissingPaths)?;
 
                                let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
                                if refund.paths().is_empty() {
@@ -7590,24 +7925,37 @@ where
                inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
        }
 
-       /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction
-       /// node.
-       fn create_one_hop_blinded_path(&self) -> BlindedPath {
+       /// Creates a blinded path by delegating to [`MessageRouter::create_blinded_paths`].
+       ///
+       /// Errors if the `MessageRouter` errors or returns an empty `Vec`.
+       fn create_blinded_path(&self) -> Result<BlindedPath, ()> {
+               let recipient = self.get_our_node_id();
                let entropy_source = self.entropy_source.deref();
                let secp_ctx = &self.secp_ctx;
-               BlindedPath::one_hop_for_message(self.get_our_node_id(), entropy_source, secp_ctx).unwrap()
+
+               let peers = self.per_peer_state.read().unwrap()
+                       .iter()
+                       .filter(|(_, peer)| peer.lock().unwrap().latest_features.supports_onion_messages())
+                       .map(|(node_id, _)| *node_id)
+                       .collect::<Vec<_>>();
+
+               self.router
+                       .create_blinded_paths(recipient, peers, entropy_source, secp_ctx)
+                       .and_then(|paths| paths.into_iter().next().ok_or(()))
        }
 
-       /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction
-       /// node.
-       fn create_one_hop_blinded_payment_path(
-               &self, payment_secret: PaymentSecret
-       ) -> (BlindedPayInfo, BlindedPath) {
+       /// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to
+       /// [`Router::create_blinded_payment_paths`].
+       fn create_blinded_payment_paths(
+               &self, amount_msats: u64, payment_secret: PaymentSecret
+       ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
                let entropy_source = self.entropy_source.deref();
                let secp_ctx = &self.secp_ctx;
 
+               let first_hops = self.list_usable_channels();
                let payee_node_id = self.get_our_node_id();
-               let max_cltv_expiry = self.best_block.read().unwrap().height() + LATENCY_GRACE_PERIOD_BLOCKS;
+               let max_cltv_expiry = self.best_block.read().unwrap().height() + CLTV_FAR_FAR_AWAY
+                       + LATENCY_GRACE_PERIOD_BLOCKS;
                let payee_tlvs = ReceiveTlvs {
                        payment_secret,
                        payment_constraints: PaymentConstraints {
@@ -7615,10 +7963,9 @@ where
                                htlc_minimum_msat: 1,
                        },
                };
-               // TODO: Err for overflow?
-               BlindedPath::one_hop_for_payment(
-                       payee_node_id, payee_tlvs, entropy_source, secp_ctx
-               ).unwrap()
+               self.router.create_blinded_payment_paths(
+                       payee_node_id, first_hops, payee_tlvs, amount_msats, entropy_source, secp_ctx
+               )
        }
 
        /// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
@@ -7724,12 +8071,14 @@ where
        /// operation. It will double-check that nothing *else* is also blocking the same channel from
        /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
        fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+               let logger = WithContext::from(
+                       &self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id())
+               );
                loop {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
                                let mut peer_state_lck = peer_state_mtx.lock().unwrap();
                                let peer_state = &mut *peer_state_lck;
-
                                if let Some(blocker) = completed_blocker.take() {
                                        // Only do this on the first iteration of the loop.
                                        if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
@@ -7744,7 +8093,7 @@ where
                                        // Check that, while holding the peer lock, we don't have anything else
                                        // blocking monitor updates for this channel. If we do, release the monitor
                                        // update(s) when those blockers complete.
-                                       log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
+                                       log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
                                                &channel_funding_outpoint.to_channel_id());
                                        break;
                                }
@@ -7753,7 +8102,7 @@ where
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
                                                if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
-                                                       log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
+                                                       log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
                                                                channel_funding_outpoint.to_channel_id());
                                                        handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
                                                                peer_state_lck, peer_state, per_peer_state, chan);
@@ -7763,13 +8112,13 @@ where
                                                                continue;
                                                        }
                                                } else {
-                                                       log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update",
+                                                       log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
                                                                channel_funding_outpoint.to_channel_id());
                                                }
                                        }
                                }
                        } else {
-                               log_debug!(self.logger,
+                               log_debug!(logger,
                                        "Got a release post-RAA monitor update for peer {} but the channel is gone",
                                        log_pubkey!(counterparty_node_id));
                        }
@@ -7801,349 +8150,9 @@ where
        }
 }
 
-fn create_fwd_pending_htlc_info(
-       msg: &msgs::UpdateAddHTLC, hop_data: msgs::InboundOnionPayload, hop_hmac: [u8; 32],
-       new_packet_bytes: [u8; onion_utils::ONION_DATA_LEN], shared_secret: [u8; 32],
-       next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>
-) -> Result<PendingHTLCInfo, InboundOnionErr> {
-       debug_assert!(next_packet_pubkey_opt.is_some());
-       let outgoing_packet = msgs::OnionPacket {
-               version: 0,
-               public_key: next_packet_pubkey_opt.unwrap_or(Err(secp256k1::Error::InvalidPublicKey)),
-               hop_data: new_packet_bytes,
-               hmac: hop_hmac,
-       };
-
-       let (short_channel_id, amt_to_forward, outgoing_cltv_value) = match hop_data {
-               msgs::InboundOnionPayload::Forward { short_channel_id, amt_to_forward, outgoing_cltv_value } =>
-                       (short_channel_id, amt_to_forward, outgoing_cltv_value),
-               msgs::InboundOnionPayload::Receive { .. } | msgs::InboundOnionPayload::BlindedReceive { .. } =>
-                       return Err(InboundOnionErr {
-                               msg: "Final Node OnionHopData provided for us as an intermediary node",
-                               err_code: 0x4000 | 22,
-                               err_data: Vec::new(),
-                       }),
-       };
-
-       Ok(PendingHTLCInfo {
-               routing: PendingHTLCRouting::Forward {
-                       onion_packet: outgoing_packet,
-                       short_channel_id,
-               },
-               payment_hash: msg.payment_hash,
-               incoming_shared_secret: shared_secret,
-               incoming_amt_msat: Some(msg.amount_msat),
-               outgoing_amt_msat: amt_to_forward,
-               outgoing_cltv_value,
-               skimmed_fee_msat: None,
-       })
-}
-
-fn create_recv_pending_htlc_info(
-       hop_data: msgs::InboundOnionPayload, shared_secret: [u8; 32], payment_hash: PaymentHash,
-       amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool,
-       counterparty_skimmed_fee_msat: Option<u64>, current_height: u32, accept_mpp_keysend: bool,
-) -> Result<PendingHTLCInfo, InboundOnionErr> {
-       let (payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, outgoing_cltv_value, payment_metadata) = match hop_data {
-               msgs::InboundOnionPayload::Receive {
-                       payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata, ..
-               } =>
-                       (payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata),
-               msgs::InboundOnionPayload::BlindedReceive {
-                       amt_msat, total_msat, outgoing_cltv_value, payment_secret, ..
-               } => {
-                       let payment_data = msgs::FinalOnionHopData { payment_secret, total_msat };
-                       (Some(payment_data), None, Vec::new(), amt_msat, outgoing_cltv_value, None)
-               }
-               msgs::InboundOnionPayload::Forward { .. } => {
-                       return Err(InboundOnionErr {
-                               err_code: 0x4000|22,
-                               err_data: Vec::new(),
-                               msg: "Got non final data with an HMAC of 0",
-                       })
-               },
-       };
-       // final_incorrect_cltv_expiry
-       if outgoing_cltv_value > cltv_expiry {
-               return Err(InboundOnionErr {
-                       msg: "Upstream node set CLTV to less than the CLTV set by the sender",
-                       err_code: 18,
-                       err_data: cltv_expiry.to_be_bytes().to_vec()
-               })
-       }
-       // final_expiry_too_soon
-       // We have to have some headroom to broadcast on chain if we have the preimage, so make sure
-       // we have at least HTLC_FAIL_BACK_BUFFER blocks to go.
-       //
-       // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
-       // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
-       // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
-       if cltv_expiry <= current_height + HTLC_FAIL_BACK_BUFFER + 1 {
-               let mut err_data = Vec::with_capacity(12);
-               err_data.extend_from_slice(&amt_msat.to_be_bytes());
-               err_data.extend_from_slice(&current_height.to_be_bytes());
-               return Err(InboundOnionErr {
-                       err_code: 0x4000 | 15, err_data,
-                       msg: "The final CLTV expiry is too soon to handle",
-               });
-       }
-       if (!allow_underpay && onion_amt_msat > amt_msat) ||
-               (allow_underpay && onion_amt_msat >
-                amt_msat.saturating_add(counterparty_skimmed_fee_msat.unwrap_or(0)))
-       {
-               return Err(InboundOnionErr {
-                       err_code: 19,
-                       err_data: amt_msat.to_be_bytes().to_vec(),
-                       msg: "Upstream node sent less than we were supposed to receive in payment",
-               });
-       }
-
-       let routing = if let Some(payment_preimage) = keysend_preimage {
-               // We need to check that the sender knows the keysend preimage before processing this
-               // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
-               // could discover the final destination of X, by probing the adjacent nodes on the route
-               // with a keysend payment of identical payment hash to X and observing the processing
-               // time discrepancies due to a hash collision with X.
-               let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array());
-               if hashed_preimage != payment_hash {
-                       return Err(InboundOnionErr {
-                               err_code: 0x4000|22,
-                               err_data: Vec::new(),
-                               msg: "Payment preimage didn't match payment hash",
-                       });
-               }
-               if !accept_mpp_keysend && payment_data.is_some() {
-                       return Err(InboundOnionErr {
-                               err_code: 0x4000|22,
-                               err_data: Vec::new(),
-                               msg: "We don't support MPP keysend payments",
-                       });
-               }
-               PendingHTLCRouting::ReceiveKeysend {
-                       payment_data,
-                       payment_preimage,
-                       payment_metadata,
-                       incoming_cltv_expiry: outgoing_cltv_value,
-                       custom_tlvs,
-               }
-       } else if let Some(data) = payment_data {
-               PendingHTLCRouting::Receive {
-                       payment_data: data,
-                       payment_metadata,
-                       incoming_cltv_expiry: outgoing_cltv_value,
-                       phantom_shared_secret,
-                       custom_tlvs,
-               }
-       } else {
-               return Err(InboundOnionErr {
-                       err_code: 0x4000|0x2000|3,
-                       err_data: Vec::new(),
-                       msg: "We require payment_secrets",
-               });
-       };
-       Ok(PendingHTLCInfo {
-               routing,
-               payment_hash,
-               incoming_shared_secret: shared_secret,
-               incoming_amt_msat: Some(amt_msat),
-               outgoing_amt_msat: onion_amt_msat,
-               outgoing_cltv_value,
-               skimmed_fee_msat: counterparty_skimmed_fee_msat,
-       })
-}
-
-/// Peel one layer off an incoming onion, returning [`PendingHTLCInfo`] (either Forward or Receive).
-/// This does all the relevant context-free checks that LDK requires for payment relay or
-/// acceptance. If the payment is to be received, and the amount matches the expected amount for
-/// a given invoice, this indicates the [`msgs::UpdateAddHTLC`], once fully committed in the
-/// channel, will generate an [`Event::PaymentClaimable`].
-pub fn peel_payment_onion<NS: Deref, L: Deref, T: secp256k1::Verification>(
-       msg: &msgs::UpdateAddHTLC, node_signer: &NS, logger: &L, secp_ctx: &Secp256k1<T>,
-       cur_height: u32, accept_mpp_keysend: bool,
-) -> Result<PendingHTLCInfo, InboundOnionErr>
-where
-       NS::Target: NodeSigner,
-       L::Target: Logger,
-{
-       let (hop, shared_secret, next_packet_details_opt) =
-               decode_incoming_update_add_htlc_onion(msg, node_signer, logger, secp_ctx
-       ).map_err(|e| {
-               let (err_code, err_data) = match e {
-                       HTLCFailureMsg::Malformed(m) => (m.failure_code, Vec::new()),
-                       HTLCFailureMsg::Relay(r) => (0x4000 | 22, r.reason.data),
-               };
-               let msg = "Failed to decode update add htlc onion";
-               InboundOnionErr { msg, err_code, err_data }
-       })?;
-       Ok(match hop {
-               onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
-                       let NextPacketDetails {
-                               next_packet_pubkey, outgoing_amt_msat: _, outgoing_scid: _, outgoing_cltv_value
-                       } = match next_packet_details_opt {
-                               Some(next_packet_details) => next_packet_details,
-                               // Forward should always include the next hop details
-                               None => return Err(InboundOnionErr {
-                                       msg: "Failed to decode update add htlc onion",
-                                       err_code: 0x4000 | 22,
-                                       err_data: Vec::new(),
-                               }),
-                       };
-
-                       if let Err((err_msg, code)) = check_incoming_htlc_cltv(
-                               cur_height, outgoing_cltv_value, msg.cltv_expiry
-                       ) {
-                               return Err(InboundOnionErr {
-                                       msg: err_msg,
-                                       err_code: code,
-                                       err_data: Vec::new(),
-                               });
-                       }
-                       create_fwd_pending_htlc_info(
-                               msg, next_hop_data, next_hop_hmac, new_packet_bytes, shared_secret,
-                               Some(next_packet_pubkey)
-                       )?
-               },
-               onion_utils::Hop::Receive(received_data) => {
-                       create_recv_pending_htlc_info(
-                               received_data, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry,
-                               None, false, msg.skimmed_fee_msat, cur_height, accept_mpp_keysend,
-                       )?
-               }
-       })
-}
-
-struct NextPacketDetails {
-       next_packet_pubkey: Result<PublicKey, secp256k1::Error>,
-       outgoing_scid: u64,
-       outgoing_amt_msat: u64,
-       outgoing_cltv_value: u32,
-}
-
-fn decode_incoming_update_add_htlc_onion<NS: Deref, L: Deref, T: secp256k1::Verification>(
-       msg: &msgs::UpdateAddHTLC, node_signer: &NS, logger: &L, secp_ctx: &Secp256k1<T>,
-) -> Result<(onion_utils::Hop, [u8; 32], Option<NextPacketDetails>), HTLCFailureMsg>
-where
-       NS::Target: NodeSigner,
-       L::Target: Logger,
-{
-       macro_rules! return_malformed_err {
-               ($msg: expr, $err_code: expr) => {
-                       {
-                               log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                               return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
-                                       channel_id: msg.channel_id,
-                                       htlc_id: msg.htlc_id,
-                                       sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).to_byte_array(),
-                                       failure_code: $err_code,
-                               }));
-                       }
-               }
-       }
-
-       if let Err(_) = msg.onion_routing_packet.public_key {
-               return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6);
-       }
-
-       let shared_secret = node_signer.ecdh(
-               Recipient::Node, &msg.onion_routing_packet.public_key.unwrap(), None
-       ).unwrap().secret_bytes();
-
-       if msg.onion_routing_packet.version != 0 {
-               //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
-               //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
-               //the hash doesn't really serve any purpose - in the case of hashing all data, the
-               //receiving node would have to brute force to figure out which version was put in the
-               //packet by the node that send us the message, in the case of hashing the hop_data, the
-               //node knows the HMAC matched, so they already know what is there...
-               return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4);
-       }
-       macro_rules! return_err {
-               ($msg: expr, $err_code: expr, $data: expr) => {
-                       {
-                               log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                               return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
-                                       channel_id: msg.channel_id,
-                                       htlc_id: msg.htlc_id,
-                                       reason: HTLCFailReason::reason($err_code, $data.to_vec())
-                                               .get_encrypted_failure_packet(&shared_secret, &None),
-                               }));
-                       }
-               }
-       }
-
-       let next_hop = match onion_utils::decode_next_payment_hop(
-               shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac,
-               msg.payment_hash, node_signer
-       ) {
-               Ok(res) => res,
-               Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
-                       return_malformed_err!(err_msg, err_code);
-               },
-               Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
-                       return_err!(err_msg, err_code, &[0; 0]);
-               },
-       };
-
-       let next_packet_details = match next_hop {
-               onion_utils::Hop::Forward {
-                       next_hop_data: msgs::InboundOnionPayload::Forward {
-                               short_channel_id, amt_to_forward, outgoing_cltv_value
-                       }, ..
-               } => {
-                       let next_packet_pubkey = onion_utils::next_hop_pubkey(secp_ctx,
-                               msg.onion_routing_packet.public_key.unwrap(), &shared_secret);
-                       NextPacketDetails {
-                               next_packet_pubkey, outgoing_scid: short_channel_id,
-                               outgoing_amt_msat: amt_to_forward, outgoing_cltv_value
-                       }
-               },
-               onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret, None)),
-               onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::Receive { .. }, .. } |
-                       onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::BlindedReceive { .. }, .. } =>
-               {
-                       return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]);
-               }
-       };
-
-       Ok((next_hop, shared_secret, Some(next_packet_details)))
-}
-
-fn check_incoming_htlc_cltv(
-       cur_height: u32, outgoing_cltv_value: u32, cltv_expiry: u32
-) -> Result<(), (&'static str, u16)> {
-       if (cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
-               return Err((
-                       "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
-                       0x1000 | 13, // incorrect_cltv_expiry
-               ));
-       }
-       // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
-       // but we want to be robust wrt to counterparty packet sanitization (see
-       // HTLC_FAIL_BACK_BUFFER rationale).
-       if cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
-               return Err(("CLTV expiry is too close", 0x1000 | 14));
-       }
-       if cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
-               return Err(("CLTV expiry is too far in the future", 21));
-       }
-       // If the HTLC expires ~now, don't bother trying to forward it to our
-       // counterparty. They should fail it anyway, but we don't want to bother with
-       // the round-trips or risk them deciding they definitely want the HTLC and
-       // force-closing to ensure they get it if we're offline.
-       // We previously had a much more aggressive check here which tried to ensure
-       // our counterparty receives an HTLC which has *our* risk threshold met on it,
-       // but there is no need to do that, and since we're a bit conservative with our
-       // risk threshold it just results in failing to forward payments.
-       if (outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
-               return Err(("Outgoing CLTV value is too soon", 0x1000 | 14));
-       }
-
-       Ok(())
-}
-
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -8205,7 +8214,7 @@ where
 
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> EventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -8226,7 +8235,7 @@ where
 
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> chain::Listen for ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -8262,13 +8271,13 @@ where
                        *best_block = BestBlock::new(header.prev_blockhash, new_height)
                }
 
-               self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
+               self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
        }
 }
 
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> chain::Confirm for ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -8288,13 +8297,13 @@ where
                let _persistence_guard =
                        PersistenceNotifierGuard::optionally_notify_skipping_background_events(
                                self, || -> NotifyOption { NotifyOption::DoPersist });
-               self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger)
+               self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context))
                        .map(|(a, b)| (a, Vec::new(), b)));
 
                let last_best_block_height = self.best_block.read().unwrap().height();
                if height < last_best_block_height {
                        let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
-                       self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
+                       self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
                }
        }
 
@@ -8311,7 +8320,7 @@ where
                                self, || -> NotifyOption { NotifyOption::DoPersist });
                *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
 
-               self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &self.logger));
+               self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
 
                macro_rules! max_time {
                        ($timestamp: expr) => {
@@ -8360,7 +8369,7 @@ where
                self.do_chain_event(None, |channel| {
                        if let Some(funding_txo) = channel.context.get_funding_txo() {
                                if funding_txo.txid == *txid {
-                                       channel.funding_transaction_unconfirmed(&self.logger).map(|()| (None, Vec::new(), None))
+                                       channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context)).map(|()| (None, Vec::new(), None))
                                } else { Ok((None, Vec::new(), None)) }
                        } else { Ok((None, Vec::new(), None)) }
                });
@@ -8369,7 +8378,7 @@ where
 
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -8407,10 +8416,11 @@ where
                                                                        timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
                                                                                HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
                                                                }
+                                                               let logger = WithChannelContext::from(&self.logger, &channel.context);
                                                                if let Some(channel_ready) = channel_ready_opt {
                                                                        send_channel_ready!(self, pending_msg_events, channel, channel_ready);
                                                                        if channel.context.is_usable() {
-                                                                               log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel.context.channel_id());
+                                                                               log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel.context.channel_id());
                                                                                if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
                                                                                        pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
                                                                                                node_id: channel.context.get_counterparty_node_id(),
@@ -8418,7 +8428,7 @@ where
                                                                                        });
                                                                                }
                                                                        } else {
-                                                                               log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", channel.context.channel_id());
+                                                                               log_trace!(logger, "Sending channel_ready WITHOUT channel_update for {}", channel.context.channel_id());
                                                                        }
                                                                }
 
@@ -8428,7 +8438,7 @@ where
                                                                }
 
                                                                if let Some(announcement_sigs) = announcement_sigs {
-                                                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", channel.context.channel_id());
+                                                                       log_trace!(logger, "Sending announcement_signatures for channel {}", channel.context.channel_id());
                                                                        pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
                                                                                node_id: channel.context.get_counterparty_node_id(),
                                                                                msg: announcement_sigs,
@@ -8519,6 +8529,7 @@ where
                                                incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
                                                phantom_shared_secret: None,
                                                outpoint: htlc.prev_funding_outpoint,
+                                               blinded_failure: htlc.forward_info.routing.blinded_failure(),
                                        });
 
                                        let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing {
@@ -8528,7 +8539,10 @@ where
                                        timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash,
                                                        HTLCFailReason::from_failure_code(0x2000 | 2),
                                                        HTLCDestination::InvalidForward { requested_forward_scid }));
-                                       log_trace!(self.logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
+                                       let logger = WithContext::from(
+                                               &self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id())
+                                       );
+                                       log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
                                        false
                                } else { true }
                        });
@@ -8613,7 +8627,7 @@ where
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
        ChannelMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -8835,8 +8849,11 @@ where
                let mut failed_channels = Vec::new();
                let mut per_peer_state = self.per_peer_state.write().unwrap();
                let remove_peer = {
-                       log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates.",
-                               log_pubkey!(counterparty_node_id));
+                       log_debug!(
+                               WithContext::from(&self.logger, Some(*counterparty_node_id), None),
+                               "Marking channels with {} disconnected and generating channel_updates.",
+                               log_pubkey!(counterparty_node_id)
+                       );
                        if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
@@ -8844,7 +8861,8 @@ where
                                peer_state.channel_by_id.retain(|_, phase| {
                                        let context = match phase {
                                                ChannelPhase::Funded(chan) => {
-                                                       if chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger).is_ok() {
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       if chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok() {
                                                                // We only retain funded channels that are not shutdown.
                                                                return true;
                                                        }
@@ -8931,8 +8949,9 @@ where
        }
 
        fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
+               let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None);
                if !init_msg.features.supports_static_remote_key() {
-                       log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
+                       log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
                        return Err(());
                }
 
@@ -8984,7 +9003,7 @@ where
                                }
                        }
 
-                       log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
+                       log_debug!(logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
 
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
@@ -8993,17 +9012,12 @@ where
                                let pending_msg_events = &mut peer_state.pending_msg_events;
 
                                peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
-                                       if let ChannelPhase::Funded(chan) = phase { Some(chan) } else {
-                                               // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
-                                               // (so won't be recovered after a crash), they shouldn't exist here and we would never need to
-                                               // worry about closing and removing them.
-                                               debug_assert!(false);
-                                               None
-                                       }
+                                       if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
                                ).for_each(|chan| {
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
                                        pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
                                                node_id: chan.context.get_counterparty_node_id(),
-                                               msg: chan.get_channel_reestablish(&self.logger),
+                                               msg: chan.get_channel_reestablish(&&logger),
                                        });
                                });
                        }
@@ -9167,7 +9181,7 @@ where
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
 OffersMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -9185,7 +9199,7 @@ where
                                let amount_msats = match InvoiceBuilder::<DerivedSigningPubkey>::amount_msats(
                                        &invoice_request
                                ) {
-                                       Ok(amount_msats) => Some(amount_msats),
+                                       Ok(amount_msats) => amount_msats,
                                        Err(error) => return Some(OffersMessage::InvoiceError(error.into())),
                                };
                                let invoice_request = match invoice_request.verify(expanded_key, secp_ctx) {
@@ -9195,64 +9209,69 @@ where
                                                return Some(OffersMessage::InvoiceError(error.into()));
                                        },
                                };
-                               let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
 
-                               match self.create_inbound_payment(amount_msats, relative_expiry, None) {
-                                       Ok((payment_hash, payment_secret)) if invoice_request.keys.is_some() => {
-                                               let payment_paths = vec![
-                                                       self.create_one_hop_blinded_payment_path(payment_secret),
-                                               ];
-                                               #[cfg(not(feature = "no-std"))]
-                                               let builder = invoice_request.respond_using_derived_keys(
-                                                       payment_paths, payment_hash
-                                               );
-                                               #[cfg(feature = "no-std")]
-                                               let created_at = Duration::from_secs(
-                                                       self.highest_seen_timestamp.load(Ordering::Acquire) as u64
-                                               );
-                                               #[cfg(feature = "no-std")]
-                                               let builder = invoice_request.respond_using_derived_keys_no_std(
-                                                       payment_paths, payment_hash, created_at
-                                               );
-                                               match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
-                                                       Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
-                                                       Err(error) => Some(OffersMessage::InvoiceError(error.into())),
-                                               }
-                                       },
-                                       Ok((payment_hash, payment_secret)) => {
-                                               let payment_paths = vec![
-                                                       self.create_one_hop_blinded_payment_path(payment_secret),
-                                               ];
-                                               #[cfg(not(feature = "no-std"))]
-                                               let builder = invoice_request.respond_with(payment_paths, payment_hash);
-                                               #[cfg(feature = "no-std")]
-                                               let created_at = Duration::from_secs(
-                                                       self.highest_seen_timestamp.load(Ordering::Acquire) as u64
-                                               );
-                                               #[cfg(feature = "no-std")]
-                                               let builder = invoice_request.respond_with_no_std(
-                                                       payment_paths, payment_hash, created_at
-                                               );
-                                               let response = builder.and_then(|builder| builder.allow_mpp().build())
-                                                       .map_err(|e| OffersMessage::InvoiceError(e.into()))
-                                                       .and_then(|invoice|
-                                                               match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) {
-                                                                       Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
-                                                                       Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError(
-                                                                                       InvoiceError::from_string("Failed signing invoice".to_string())
-                                                                       )),
-                                                                       Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
-                                                                                       InvoiceError::from_string("Failed invoice signature verification".to_string())
-                                                                       )),
-                                                               });
-                                               match response {
-                                                       Ok(invoice) => Some(invoice),
-                                                       Err(error) => Some(error),
-                                               }
+                               let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
+                               let (payment_hash, payment_secret) = match self.create_inbound_payment(
+                                       Some(amount_msats), relative_expiry, None
+                               ) {
+                                       Ok((payment_hash, payment_secret)) => (payment_hash, payment_secret),
+                                       Err(()) => {
+                                               let error = Bolt12SemanticError::InvalidAmount;
+                                               return Some(OffersMessage::InvoiceError(error.into()));
                                        },
+                               };
+
+                               let payment_paths = match self.create_blinded_payment_paths(
+                                       amount_msats, payment_secret
+                               ) {
+                                       Ok(payment_paths) => payment_paths,
                                        Err(()) => {
-                                               Some(OffersMessage::InvoiceError(Bolt12SemanticError::InvalidAmount.into()))
+                                               let error = Bolt12SemanticError::MissingPaths;
+                                               return Some(OffersMessage::InvoiceError(error.into()));
                                        },
+                               };
+
+                               #[cfg(feature = "no-std")]
+                               let created_at = Duration::from_secs(
+                                       self.highest_seen_timestamp.load(Ordering::Acquire) as u64
+                               );
+
+                               if invoice_request.keys.is_some() {
+                                       #[cfg(not(feature = "no-std"))]
+                                       let builder = invoice_request.respond_using_derived_keys(
+                                               payment_paths, payment_hash
+                                       );
+                                       #[cfg(feature = "no-std")]
+                                       let builder = invoice_request.respond_using_derived_keys_no_std(
+                                               payment_paths, payment_hash, created_at
+                                       );
+                                       match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
+                                               Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
+                                               Err(error) => Some(OffersMessage::InvoiceError(error.into())),
+                                       }
+                               } else {
+                                       #[cfg(not(feature = "no-std"))]
+                                       let builder = invoice_request.respond_with(payment_paths, payment_hash);
+                                       #[cfg(feature = "no-std")]
+                                       let builder = invoice_request.respond_with_no_std(
+                                               payment_paths, payment_hash, created_at
+                                       );
+                                       let response = builder.and_then(|builder| builder.allow_mpp().build())
+                                               .map_err(|e| OffersMessage::InvoiceError(e.into()))
+                                               .and_then(|invoice|
+                                                       match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) {
+                                                               Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
+                                                               Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError(
+                                                                               InvoiceError::from_string("Failed signing invoice".to_string())
+                                                               )),
+                                                               Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
+                                                                               InvoiceError::from_string("Failed invoice signature verification".to_string())
+                                                               )),
+                                                       });
+                                       match response {
+                                               Ok(invoice) => Some(invoice),
+                                               Err(error) => Some(error),
+                                       }
                                }
                        },
                        OffersMessage::Invoice(invoice) => {
@@ -9482,9 +9501,14 @@ impl_writeable_tlv_based!(PhantomRouteHints, {
        (6, real_node_pubkey, required),
 });
 
+impl_writeable_tlv_based!(BlindedForward, {
+       (0, inbound_blinding_point, required),
+});
+
 impl_writeable_tlv_based_enum!(PendingHTLCRouting,
        (0, Forward) => {
                (0, onion_packet, required),
+               (1, blinded, option),
                (2, short_channel_id, required),
        },
        (1, Receive) => {
@@ -9493,6 +9517,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting,
                (2, incoming_cltv_expiry, required),
                (3, payment_metadata, option),
                (5, custom_tlvs, optional_vec),
+               (7, requires_blinded_error, (default_value, false)),
        },
        (2, ReceiveKeysend) => {
                (0, payment_preimage, required),
@@ -9586,10 +9611,16 @@ impl_writeable_tlv_based_enum!(PendingHTLCStatus, ;
        (1, Fail),
 );
 
+impl_writeable_tlv_based_enum!(BlindedFailure,
+       (0, FromIntroductionNode) => {},
+       (2, FromBlindedNode) => {}, ;
+);
+
 impl_writeable_tlv_based!(HTLCPreviousHopData, {
        (0, short_channel_id, required),
        (1, phantom_shared_secret, option),
        (2, outpoint, required),
+       (3, blinded_failure, option),
        (4, htlc_id, required),
        (6, incoming_packet_shared_secret, required),
        (7, user_channel_id, option),
@@ -9746,13 +9777,68 @@ impl_writeable_tlv_based!(PendingAddHTLCInfo, {
        (6, prev_funding_outpoint, required),
 });
 
-impl_writeable_tlv_based_enum!(HTLCForwardInfo,
-       (1, FailHTLC) => {
-               (0, htlc_id, required),
-               (2, err_packet, required),
-       };
-       (0, AddHTLC)
-);
+impl Writeable for HTLCForwardInfo {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               const FAIL_HTLC_VARIANT_ID: u8 = 1;
+               match self {
+                       Self::AddHTLC(info) => {
+                               0u8.write(w)?;
+                               info.write(w)?;
+                       },
+                       Self::FailHTLC { htlc_id, err_packet } => {
+                               FAIL_HTLC_VARIANT_ID.write(w)?;
+                               write_tlv_fields!(w, {
+                                       (0, htlc_id, required),
+                                       (2, err_packet, required),
+                               });
+                       },
+                       Self::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+                               // Since this variant was added in 0.0.119, write this as `::FailHTLC` with an empty error
+                               // packet so older versions have something to fail back with, but serialize the real data as
+                               // optional TLVs for the benefit of newer versions.
+                               FAIL_HTLC_VARIANT_ID.write(w)?;
+                               let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
+                               write_tlv_fields!(w, {
+                                       (0, htlc_id, required),
+                                       (1, failure_code, required),
+                                       (2, dummy_err_packet, required),
+                                       (3, sha256_of_onion, required),
+                               });
+                       },
+               }
+               Ok(())
+       }
+}
+
+impl Readable for HTLCForwardInfo {
+       fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+               let id: u8 = Readable::read(r)?;
+               Ok(match id {
+                       0 => Self::AddHTLC(Readable::read(r)?),
+                       1 => {
+                               _init_and_read_len_prefixed_tlv_fields!(r, {
+                                       (0, htlc_id, required),
+                                       (1, malformed_htlc_failure_code, option),
+                                       (2, err_packet, required),
+                                       (3, sha256_of_onion, option),
+                               });
+                               if let Some(failure_code) = malformed_htlc_failure_code {
+                                       Self::FailMalformedHTLC {
+                                               htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
+                                               failure_code,
+                                               sha256_of_onion: sha256_of_onion.ok_or(DecodeError::InvalidValue)?,
+                                       }
+                               } else {
+                                       Self::FailHTLC {
+                                               htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
+                                               err_packet: _init_tlv_based_struct_field!(err_packet, required),
+                                       }
+                               }
+                       },
+                       _ => return Err(DecodeError::InvalidValue),
+               })
+       }
+}
 
 impl_writeable_tlv_based!(PendingInboundPayment, {
        (0, payment_secret, required),
@@ -9764,7 +9850,7 @@ impl_writeable_tlv_based!(PendingInboundPayment, {
 
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> Writeable for ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -10068,7 +10154,7 @@ impl_writeable_tlv_based_enum!(ChannelShutdownState,
 /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
 pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -10127,13 +10213,13 @@ where
        /// this struct.
        ///
        /// This is not exported to bindings users because we have no HashMap bindings
-       pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<SP::Target as SignerProvider>::Signer>>,
+       pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
 }
 
 impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
                ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -10146,7 +10232,7 @@ where
        /// HashMap for you. This is primarily useful for C bindings where it is not practical to
        /// populate a HashMap directly from C.
        pub fn new(entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, default_config: UserConfig,
-                       mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::Signer>>) -> Self {
+                       mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>) -> Self {
                Self {
                        entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config,
                        channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
@@ -10159,7 +10245,7 @@ where
 impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
        ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>> for (BlockHash, Arc<ChannelManager<M, T, ES, NS, SP, F, R, L>>)
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -10177,7 +10263,7 @@ where
 impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
        ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>> for (BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, L>)
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
        T::Target: BroadcasterInterface,
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
@@ -10198,7 +10284,7 @@ where
                let channel_count: u64 = Readable::read(reader)?;
                let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
                let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
-               let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+               let mut outpoint_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = VecDeque::new();
                let mut close_background_events = Vec::new();
@@ -10206,6 +10292,7 @@ where
                        let mut channel: Channel<SP> = Channel::read(reader, (
                                &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
                        ))?;
+                       let logger = WithChannelContext::from(&args.logger, &channel.context);
                        let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
                        funding_txo_set.insert(funding_txo.clone());
                        if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
@@ -10214,22 +10301,22 @@ where
                                                channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
                                                channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
                                        // But if the channel is behind of the monitor, close the channel:
-                                       log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
-                                       log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
+                                       log_error!(logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
+                                       log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
                                        if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
-                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+                                               log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
                                                        &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
                                        }
                                        if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() {
-                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
+                                               log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
                                                        &channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number());
                                        }
                                        if channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() {
-                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
+                                               log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
                                                        &channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number());
                                        }
                                        if channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() {
-                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
+                                               log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
                                                        &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
                                        }
                                        let mut shutdown_result = channel.context.force_shutdown(true);
@@ -10262,21 +10349,21 @@ where
                                                        // claim update ChannelMonitor updates were persisted prior to persising
                                                        // the ChannelMonitor update for the forward leg, so attempting to fail the
                                                        // backwards leg of the HTLC will simply be rejected.
-                                                       log_info!(args.logger,
+                                                       log_info!(logger,
                                                                "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
                                                                &channel.context.channel_id(), &payment_hash);
                                                        failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id()));
                                                }
                                        }
                                } else {
-                                       log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
+                                       log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
                                                &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
                                                monitor.get_latest_update_id());
                                        if let Some(short_channel_id) = channel.context.get_short_channel_id() {
                                                short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
                                        }
-                                       if channel.context.is_funding_broadcast() {
-                                               id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id());
+                                       if let Some(funding_txo) = channel.context.get_funding_txo() {
+                                               outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id());
                                        }
                                        match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) {
                                                hash_map::Entry::Occupied(mut entry) => {
@@ -10303,21 +10390,23 @@ where
                                        channel_capacity_sats: Some(channel.context.get_value_satoshis()),
                                }, None));
                        } else {
-                               log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
-                               log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
-                               log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
-                               log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
-                               log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+                               log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
+                               log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+                               log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                               log_error!(logger, " Without the ChannelMonitor we cannot continue without risking funds.");
+                               log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
                                return Err(DecodeError::InvalidValue);
                        }
                }
 
-               for (funding_txo, _) in args.channel_monitors.iter() {
+               for (funding_txo, monitor) in args.channel_monitors.iter() {
                        if !funding_txo_set.contains(funding_txo) {
-                               log_info!(args.logger, "Queueing monitor update to ensure missing channel {} is force closed",
+                               let logger = WithChannelMonitor::from(&args.logger, monitor);
+                               log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
                                        &funding_txo.to_channel_id());
                                let monitor_update = ChannelMonitorUpdate {
                                        update_id: CLOSED_CHANNEL_UPDATE_ID,
+                                       counterparty_node_id: None,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
                                };
                                close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
@@ -10491,12 +10580,12 @@ where
                let mut pending_background_events = Vec::new();
                macro_rules! handle_in_flight_updates {
                        ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $funding_txo: expr,
-                        $monitor: expr, $peer_state: expr, $channel_info_log: expr
+                        $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr
                        ) => { {
                                let mut max_in_flight_update_id = 0;
                                $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
                                for update in $chan_in_flight_upds.iter() {
-                                       log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
+                                       log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
                                                update.update_id, $channel_info_log, &$funding_txo.to_channel_id());
                                        max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
                                        pending_background_events.push(
@@ -10517,7 +10606,7 @@ where
                                                });
                                }
                                if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
-                                       log_error!(args.logger, "Duplicate in-flight monitor update set for the same channel!");
+                                       log_error!($logger, "Duplicate in-flight monitor update set for the same channel!");
                                        return Err(DecodeError::InvalidValue);
                                }
                                max_in_flight_update_id
@@ -10529,6 +10618,8 @@ where
                        let peer_state = &mut *peer_state_lock;
                        for phase in peer_state.channel_by_id.values() {
                                if let ChannelPhase::Funded(chan) = phase {
+                                       let logger = WithChannelContext::from(&args.logger, &chan.context);
+
                                        // Channels that were persisted have to be funded, otherwise they should have been
                                        // discarded.
                                        let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
@@ -10539,19 +10630,19 @@ where
                                                if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
                                                        max_in_flight_update_id = cmp::max(max_in_flight_update_id,
                                                                handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
-                                                                       funding_txo, monitor, peer_state, ""));
+                                                                       funding_txo, monitor, peer_state, logger, ""));
                                                }
                                        }
                                        if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
                                                // If the channel is ahead of the monitor, return InvalidValue:
-                                               log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
-                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
+                                               log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
+                                               log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
                                                        chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
-                                               log_error!(args.logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
-                                               log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
-                                               log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
-                                               log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
-                                               log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+                                               log_error!(logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
+                                               log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+                                               log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                                               log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+                                               log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
                                                return Err(DecodeError::InvalidValue);
                                        }
                                } else {
@@ -10565,6 +10656,7 @@ where
 
                if let Some(in_flight_upds) = in_flight_monitor_updates {
                        for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
+                               let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(funding_txo.to_channel_id()));
                                if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
                                        // Now that we've removed all the in-flight monitor updates for channels that are
                                        // still open, we need to replay any monitor updates that are for closed channels,
@@ -10574,15 +10666,15 @@ where
                                        });
                                        let mut peer_state = peer_state_mutex.lock().unwrap();
                                        handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
-                                               funding_txo, monitor, peer_state, "closed ");
+                                               funding_txo, monitor, peer_state, logger, "closed ");
                                } else {
-                                       log_error!(args.logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
-                                       log_error!(args.logger, " The ChannelMonitor for channel {} is missing.",
+                                       log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
+                                       log_error!(logger, " The ChannelMonitor for channel {} is missing.",
                                                &funding_txo.to_channel_id());
-                                       log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
-                                       log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
-                                       log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
-                                       log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+                                       log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+                                       log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                                       log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+                                       log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
                                        return Err(DecodeError::InvalidValue);
                                }
                        }
@@ -10606,12 +10698,13 @@ where
                        // We only rebuild the pending payments map if we were most recently serialized by
                        // 0.0.102+
                        for (_, monitor) in args.channel_monitors.iter() {
-                               let counterparty_opt = id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id());
+                               let counterparty_opt = outpoint_to_peer.get(&monitor.get_funding_txo().0);
                                if counterparty_opt.is_none() {
+                                       let logger = WithChannelMonitor::from(&args.logger, monitor);
                                        for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
                                                if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
                                                        if path.hops.is_empty() {
-                                                               log_error!(args.logger, "Got an empty path for a pending payment");
+                                                               log_error!(logger, "Got an empty path for a pending payment");
                                                                return Err(DecodeError::InvalidValue);
                                                        }
 
@@ -10621,8 +10714,8 @@ where
                                                        match pending_outbounds.pending_outbound_payments.lock().unwrap().entry(payment_id) {
                                                                hash_map::Entry::Occupied(mut entry) => {
                                                                        let newly_added = entry.get_mut().insert(session_priv_bytes, &path);
-                                                                       log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
-                                                                               if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), &htlc.payment_hash);
+                                                                       log_info!(logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
+                                                                               if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), htlc.payment_hash);
                                                                },
                                                                hash_map::Entry::Vacant(entry) => {
                                                                        let path_fee = path.fee_msat();
@@ -10642,7 +10735,7 @@ where
                                                                                starting_block_height: best_block_height,
                                                                                remaining_max_total_routing_fee_msat: None, // only used for retries, and we'll never retry on startup
                                                                        });
-                                                                       log_info!(args.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}",
+                                                                       log_info!(logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}",
                                                                                path_amt, &htlc.payment_hash,  log_bytes!(session_priv_bytes));
                                                                }
                                                        }
@@ -10664,7 +10757,7 @@ where
                                                                        forwards.retain(|forward| {
                                                                                if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
                                                                                        if pending_forward_matches_htlc(&htlc_info) {
-                                                                                               log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
+                                                                                               log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
                                                                                                        &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
                                                                                                false
                                                                                        } else { true }
@@ -10674,7 +10767,7 @@ where
                                                                });
                                                                pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
                                                                        if pending_forward_matches_htlc(&htlc_info) {
-                                                                               log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
+                                                                               log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
                                                                                        &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
                                                                                pending_events_read.retain(|(event, _)| {
                                                                                        if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
@@ -10702,7 +10795,7 @@ where
                                                                                        counterparty_node_id: path.hops[0].pubkey,
                                                                                };
                                                                        pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
-                                                                               path, false, compl_action, &pending_events, &args.logger);
+                                                                               path, false, compl_action, &pending_events, &&logger);
                                                                        pending_events_read = pending_events.into_inner().unwrap();
                                                                }
                                                        },
@@ -10833,6 +10926,7 @@ where
                        let peer_state = &mut *peer_state_lock;
                        for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
                                if let ChannelPhase::Funded(chan) = phase {
+                                       let logger = WithChannelContext::from(&args.logger, &chan.context);
                                        if chan.context.outbound_scid_alias() == 0 {
                                                let mut outbound_scid_alias;
                                                loop {
@@ -10844,14 +10938,14 @@ where
                                        } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
                                                // Note that in rare cases its possible to hit this while reading an older
                                                // channel if we just happened to pick a colliding outbound alias above.
-                                               log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
+                                               log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
                                                return Err(DecodeError::InvalidValue);
                                        }
                                        if chan.context.is_usable() {
                                                if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
                                                        // Note that in rare cases its possible to hit this while reading an older
                                                        // channel if we just happened to pick a colliding outbound alias above.
-                                                       log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
+                                                       log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
                                                        return Err(DecodeError::InvalidValue);
                                                }
                                        }
@@ -10897,12 +10991,13 @@ where
                                                // without the new monitor persisted - we'll end up right back here on
                                                // restart.
                                                let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
-                                               if let Some(peer_node_id) = id_to_peer.get(&previous_channel_id){
+                                               if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) {
                                                        let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                        let peer_state = &mut *peer_state_lock;
                                                        if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
-                                                               channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger);
+                                                               let logger = WithChannelContext::from(&args.logger, &channel.context);
+                                                               channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger);
                                                        }
                                                }
                                                if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
@@ -10923,14 +11018,15 @@ where
 
                for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
                        if let Some(peer_state) = per_peer_state.get(&node_id) {
-                               for (_, actions) in monitor_update_blocked_actions.iter() {
+                               for (channel_id, actions) in monitor_update_blocked_actions.iter() {
+                                       let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id));
                                        for action in actions.iter() {
                                                if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
                                                        downstream_counterparty_and_funding_outpoint:
                                                                Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
                                                } = action {
                                                        if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
-                                                               log_trace!(args.logger,
+                                                               log_trace!(logger,
                                                                        "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
                                                                        blocked_channel_outpoint.to_channel_id());
                                                                blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
@@ -10951,7 +11047,7 @@ where
                                }
                                peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
                        } else {
-                               log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id);
+                               log_error!(WithContext::from(&args.logger, Some(node_id), None), "Got blocked actions without a per-peer-state for {}", node_id);
                                return Err(DecodeError::InvalidValue);
                        }
                }
@@ -10973,7 +11069,7 @@ where
                        forward_htlcs: Mutex::new(forward_htlcs),
                        claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
                        outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
-                       id_to_peer: Mutex::new(id_to_peer),
+                       outpoint_to_peer: Mutex::new(outpoint_to_peer),
                        short_to_chan_info: FairRwLock::new(short_to_chan_info),
                        fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
 
@@ -11038,13 +11134,14 @@ mod tests {
        use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
        use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
        use crate::ln::ChannelId;
-       use crate::ln::channelmanager::{create_recv_pending_htlc_info, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
-       use crate::ln::features::{ChannelFeatures, NodeFeatures};
+       use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
        use crate::ln::functional_test_utils::*;
        use crate::ln::msgs::{self, ErrorAction};
        use crate::ln::msgs::ChannelMessageHandler;
-       use crate::routing::router::{Path, PaymentParameters, RouteHop, RouteParameters, find_route};
+       use crate::prelude::*;
+       use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
        use crate::util::errors::APIError;
+       use crate::util::ser::Writeable;
        use crate::util::test_utils;
        use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
        use crate::sign::EntropySource;
@@ -11591,8 +11688,8 @@ mod tests {
        }
 
        #[test]
-       fn test_id_to_peer_coverage() {
-               // Test that the `ChannelManager:id_to_peer` contains channels which have been assigned
+       fn test_outpoint_to_peer_coverage() {
+               // Test that the `ChannelManager:outpoint_to_peer` contains channels which have been assigned
                // a `channel_id` (i.e. have had the funding tx created), and that they are removed once
                // the channel is successfully closed.
                let chanmon_cfgs = create_chanmon_cfgs(2);
@@ -11606,42 +11703,42 @@ mod tests {
                let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
                nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
 
-               let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+               let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
                let channel_id = ChannelId::from_bytes(tx.txid().to_byte_array());
                {
-                       // Ensure that the `id_to_peer` map is empty until either party has received the
+                       // Ensure that the `outpoint_to_peer` map is empty until either party has received the
                        // funding transaction, and have the real `channel_id`.
-                       assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0);
-                       assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+                       assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
+                       assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
                }
 
                nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
                {
-                       // Assert that `nodes[0]`'s `id_to_peer` map is populated with the channel as soon as
+                       // Assert that `nodes[0]`'s `outpoint_to_peer` map is populated with the channel as soon as
                        // as it has the funding transaction.
-                       let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+                       let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
-                       assert!(nodes_0_lock.contains_key(&channel_id));
+                       assert!(nodes_0_lock.contains_key(&funding_output));
                }
 
-               assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+               assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
 
                let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
 
                nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
                {
-                       let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+                       let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
-                       assert!(nodes_0_lock.contains_key(&channel_id));
+                       assert!(nodes_0_lock.contains_key(&funding_output));
                }
                expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
 
                {
-                       // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as
-                       // as it has the funding transaction.
-                       let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+                       // Assert that `nodes[1]`'s `outpoint_to_peer` map is populated with the channel as
+                       // soon as it has the funding transaction.
+                       let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
                        assert_eq!(nodes_1_lock.len(), 1);
-                       assert!(nodes_1_lock.contains_key(&channel_id));
+                       assert!(nodes_1_lock.contains_key(&funding_output));
                }
                check_added_monitors!(nodes[1], 1);
                let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
@@ -11660,23 +11757,23 @@ mod tests {
                let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
                nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0);
                {
-                       // Assert that the channel is kept in the `id_to_peer` map for both nodes until the
+                       // Assert that the channel is kept in the `outpoint_to_peer` map for both nodes until the
                        // channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the
                        // fee for the closing transaction has been negotiated and the parties has the other
                        // party's signature for the fee negotiated closing transaction.)
-                       let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+                       let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
-                       assert!(nodes_0_lock.contains_key(&channel_id));
+                       assert!(nodes_0_lock.contains_key(&funding_output));
                }
 
                {
                        // At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
                        // `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
                        // from `nodes[0]` for the closing transaction with the proposed fee, the channel is
-                       // kept in the `nodes[1]`'s `id_to_peer` map.
-                       let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+                       // kept in the `nodes[1]`'s `outpoint_to_peer` map.
+                       let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
                        assert_eq!(nodes_1_lock.len(), 1);
-                       assert!(nodes_1_lock.contains_key(&channel_id));
+                       assert!(nodes_1_lock.contains_key(&funding_output));
                }
 
                nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
@@ -11684,24 +11781,24 @@ mod tests {
                        // `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and
                        // therefore has all it needs to fully close the channel (both signatures for the
                        // closing transaction).
-                       // Assert that the channel is removed from `nodes[0]`'s `id_to_peer` map as it can be
+                       // Assert that the channel is removed from `nodes[0]`'s `outpoint_to_peer` map as it can be
                        // fully closed by `nodes[0]`.
-                       assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0);
+                       assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
 
-                       // Assert that the channel is still in `nodes[1]`'s  `id_to_peer` map, as `nodes[1]`
+                       // Assert that the channel is still in `nodes[1]`'s  `outpoint_to_peer` map, as `nodes[1]`
                        // doesn't have `nodes[0]`'s signature for the closing transaction yet.
-                       let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+                       let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
                        assert_eq!(nodes_1_lock.len(), 1);
-                       assert!(nodes_1_lock.contains_key(&channel_id));
+                       assert!(nodes_1_lock.contains_key(&funding_output));
                }
 
                let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
 
                nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap());
                {
-                       // Assert that the channel has now been removed from both parties `id_to_peer` map once
+                       // Assert that the channel has now been removed from both parties `outpoint_to_peer` map once
                        // they both have everything required to fully close the channel.
-                       assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+                       assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
                }
                let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
 
@@ -12321,134 +12418,60 @@ mod tests {
        }
 
        #[test]
-       fn test_peel_payment_onion() {
-               use super::*;
-               let secp_ctx = Secp256k1::new();
-
-               let bob = crate::sign::KeysManager::new(&[2; 32], 42, 42);
-               let bob_pk = PublicKey::from_secret_key(&secp_ctx, &bob.get_node_secret_key());
-               let charlie = crate::sign::KeysManager::new(&[3; 32], 42, 42);
-               let charlie_pk = PublicKey::from_secret_key(&secp_ctx, &charlie.get_node_secret_key());
-
-               let (session_priv, total_amt_msat, cur_height, recipient_onion, preimage, payment_hash,
-                       prng_seed, hops, recipient_amount, pay_secret) = payment_onion_args(bob_pk, charlie_pk);
-
-               let path = Path {
-                       hops: hops,
-                       blinded_tail: None,
+       fn test_malformed_forward_htlcs_ser() {
+               // Ensure that `HTLCForwardInfo::FailMalformedHTLC`s are (de)serialized properly.
+               let chanmon_cfg = create_chanmon_cfgs(1);
+               let node_cfg = create_node_cfgs(1, &chanmon_cfg);
+               let persister;
+               let chain_monitor;
+               let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]);
+               let deserialized_chanmgr;
+               let mut nodes = create_network(1, &node_cfg, &chanmgrs);
+
+               let dummy_failed_htlc = |htlc_id| {
+                       HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }, }
+               };
+               let dummy_malformed_htlc = |htlc_id| {
+                       HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code: 0x4000, sha256_of_onion: [0; 32] }
                };
 
-               let (amount_msat, cltv_expiry, onion) = create_payment_onion(
-                       &secp_ctx, &path, &session_priv, total_amt_msat, recipient_onion, cur_height,
-                       payment_hash, Some(preimage), prng_seed
-               ).unwrap();
+               let dummy_htlcs_1: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
+                       if htlc_id % 2 == 0 {
+                               dummy_failed_htlc(htlc_id)
+                       } else {
+                               dummy_malformed_htlc(htlc_id)
+                       }
+               }).collect();
 
-               let msg = make_update_add_msg(amount_msat, cltv_expiry, payment_hash, onion);
-               let logger = test_utils::TestLogger::with_id("bob".to_string());
+               let dummy_htlcs_2: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
+                       if htlc_id % 2 == 1 {
+                               dummy_failed_htlc(htlc_id)
+                       } else {
+                               dummy_malformed_htlc(htlc_id)
+                       }
+               }).collect();
 
-               let peeled = peel_payment_onion(&msg, &&bob, &&logger, &secp_ctx, cur_height, true)
-                       .map_err(|e| e.msg).unwrap();
 
-               let next_onion = match peeled.routing {
-                       PendingHTLCRouting::Forward { onion_packet, short_channel_id: _ } => {
-                               onion_packet
-                       },
-                       _ => panic!("expected a forwarded onion"),
-               };
+               let (scid_1, scid_2) = (42, 43);
+               let mut forward_htlcs = HashMap::new();
+               forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
+               forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
 
-               let msg2 = make_update_add_msg(amount_msat, cltv_expiry, payment_hash, next_onion);
-               let peeled2 = peel_payment_onion(&msg2, &&charlie, &&logger, &secp_ctx, cur_height, true)
-                       .map_err(|e| e.msg).unwrap();
-
-               match peeled2.routing {
-                       PendingHTLCRouting::ReceiveKeysend { payment_preimage, payment_data, incoming_cltv_expiry, .. } => {
-                               assert_eq!(payment_preimage, preimage);
-                               assert_eq!(peeled2.outgoing_amt_msat, recipient_amount);
-                               assert_eq!(incoming_cltv_expiry, peeled2.outgoing_cltv_value);
-                               let msgs::FinalOnionHopData{total_msat, payment_secret} = payment_data.unwrap();
-                               assert_eq!(total_msat, total_amt_msat);
-                               assert_eq!(payment_secret, pay_secret);
-                       },
-                       _ => panic!("expected a received keysend"),
-               };
-       }
+               let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
+               *chanmgr_fwd_htlcs = forward_htlcs.clone();
+               core::mem::drop(chanmgr_fwd_htlcs);
+
+               reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr);
 
-       fn make_update_add_msg(
-               amount_msat: u64, cltv_expiry: u32, payment_hash: PaymentHash,
-               onion_routing_packet: msgs::OnionPacket
-       ) -> msgs::UpdateAddHTLC {
-               msgs::UpdateAddHTLC {
-                       channel_id: ChannelId::from_bytes([0; 32]),
-                       htlc_id: 0,
-                       amount_msat,
-                       cltv_expiry,
-                       payment_hash,
-                       onion_routing_packet,
-                       skimmed_fee_msat: None,
+               let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
+               for scid in [scid_1, scid_2].iter() {
+                       let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap();
+                       assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs);
                }
-       }
+               assert!(deserialized_fwd_htlcs.is_empty());
+               core::mem::drop(deserialized_fwd_htlcs);
 
-       fn payment_onion_args(hop_pk: PublicKey, recipient_pk: PublicKey) -> (
-               SecretKey, u64, u32, RecipientOnionFields, PaymentPreimage, PaymentHash, [u8; 32],
-               Vec<RouteHop>, u64, PaymentSecret,
-       ) {
-               let session_priv_bytes = [42; 32];
-               let session_priv = SecretKey::from_slice(&session_priv_bytes).unwrap();
-               let total_amt_msat = 1000;
-               let cur_height = 1000;
-               let pay_secret = PaymentSecret([99; 32]);
-               let recipient_onion = RecipientOnionFields::secret_only(pay_secret);
-               let preimage_bytes = [43; 32];
-               let preimage = PaymentPreimage(preimage_bytes);
-               let rhash_bytes = Sha256::hash(&preimage_bytes).to_byte_array();
-               let payment_hash = PaymentHash(rhash_bytes);
-               let prng_seed = [44; 32];
-
-               // make a route alice -> bob -> charlie
-               let hop_fee = 1;
-               let recipient_amount = total_amt_msat - hop_fee;
-               let hops = vec![
-                       RouteHop {
-                               pubkey: hop_pk,
-                               fee_msat: hop_fee,
-                               cltv_expiry_delta: 42,
-                               short_channel_id: 1,
-                               node_features: NodeFeatures::empty(),
-                               channel_features: ChannelFeatures::empty(),
-                               maybe_announced_channel: false,
-                       },
-                       RouteHop {
-                               pubkey: recipient_pk,
-                               fee_msat: recipient_amount,
-                               cltv_expiry_delta: 42,
-                               short_channel_id: 2,
-                               node_features: NodeFeatures::empty(),
-                               channel_features: ChannelFeatures::empty(),
-                               maybe_announced_channel: false,
-                       }
-               ];
-
-               (session_priv, total_amt_msat, cur_height, recipient_onion, preimage, payment_hash,
-                       prng_seed, hops, recipient_amount, pay_secret)
-       }
-
-       pub fn create_payment_onion<T: bitcoin::secp256k1::Signing>(
-               secp_ctx: &Secp256k1<T>, path: &Path, session_priv: &SecretKey, total_msat: u64,
-               recipient_onion: RecipientOnionFields, best_block_height: u32, payment_hash: PaymentHash,
-               keysend_preimage: Option<PaymentPreimage>, prng_seed: [u8; 32]
-       ) -> Result<(u64, u32, msgs::OnionPacket), ()> {
-               let onion_keys = super::onion_utils::construct_onion_keys(&secp_ctx, &path, &session_priv).map_err(|_| ())?;
-               let (onion_payloads, htlc_msat, htlc_cltv) = super::onion_utils::build_onion_payloads(
-                       &path,
-                       total_msat,
-                       recipient_onion,
-                       best_block_height + 1,
-                       &keysend_preimage,
-               ).map_err(|_| ())?;
-               let onion_packet = super::onion_utils::construct_onion_packet(
-                       onion_payloads, onion_keys, prng_seed, &payment_hash
-               )?;
-               Ok((htlc_msat, htlc_cltv, onion_packet))
+               expect_pending_htlcs_forwardable!(nodes[0]);
        }
 }
 
index d10c3a71927b254854fd11869f1d9cbaedf7cb35..2e732b17aa8bc8a88f923942ce8e47f0f3807b82 100644 (file)
 //!     (see [BOLT-4](https://github.com/lightning/bolts/blob/master/04-onion-routing.md#basic-multi-part-payments) for more information).
 //! - `Wumbo` - requires/supports that a node create large channels. Called `option_support_large_channel` in the spec.
 //!     (see [BOLT-2](https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#the-open_channel-message) for more information).
+//! - `AnchorsZeroFeeHtlcTx` - requires/supports that commitment transactions include anchor outputs
+//!     and HTLC transactions are pre-signed with zero fee (see
+//!     [BOLT-3](https://github.com/lightning/bolts/blob/master/03-transactions.md) for more
+//!     information).
+//! - `RouteBlinding` - requires/supports that a node can relay payments over blinded paths
+//!     (see [BOLT-4](https://github.com/lightning/bolts/blob/master/04-onion-routing.md#route-blinding) for more information).
 //! - `ShutdownAnySegwit` - requires/supports that future segwit versions are allowed in `shutdown`
 //!     (see [BOLT-2](https://github.com/lightning/bolts/blob/master/02-peer-protocol.md) for more information).
 //! - `OnionMessages` - requires/supports forwarding onion messages
 //!      for more info).
 //! - `Keysend` - send funds to a node without an invoice
 //!     (see the [`Keysend` feature assignment proposal](https://github.com/lightning/bolts/issues/605#issuecomment-606679798) for more information).
-//! - `AnchorsZeroFeeHtlcTx` - requires/supports that commitment transactions include anchor outputs
-//!     and HTLC transactions are pre-signed with zero fee (see
-//!     [BOLT-3](https://github.com/lightning/bolts/blob/master/03-transactions.md) for more
-//!     information).
 //!
 //! LDK knows about the following features, but does not support them:
 //! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be
@@ -143,7 +145,7 @@ mod sealed {
                // Byte 2
                BasicMPP | Wumbo | AnchorsNonzeroFeeHtlcTx | AnchorsZeroFeeHtlcTx,
                // Byte 3
-               ShutdownAnySegwit | Taproot,
+               RouteBlinding | ShutdownAnySegwit | Taproot,
                // Byte 4
                OnionMessages,
                // Byte 5
@@ -159,7 +161,7 @@ mod sealed {
                // Byte 2
                BasicMPP | Wumbo | AnchorsNonzeroFeeHtlcTx | AnchorsZeroFeeHtlcTx,
                // Byte 3
-               ShutdownAnySegwit | Taproot,
+               RouteBlinding | ShutdownAnySegwit | Taproot,
                // Byte 4
                OnionMessages,
                // Byte 5
@@ -391,6 +393,9 @@ mod sealed {
        define_feature!(23, AnchorsZeroFeeHtlcTx, [InitContext, NodeContext, ChannelTypeContext],
                "Feature flags for `option_anchors_zero_fee_htlc_tx`.", set_anchors_zero_fee_htlc_tx_optional,
                set_anchors_zero_fee_htlc_tx_required, supports_anchors_zero_fee_htlc_tx, requires_anchors_zero_fee_htlc_tx);
+       define_feature!(25, RouteBlinding, [InitContext, NodeContext],
+               "Feature flags for `option_route_blinding`.", set_route_blinding_optional,
+               set_route_blinding_required, supports_route_blinding, requires_route_blinding);
        define_feature!(27, ShutdownAnySegwit, [InitContext, NodeContext],
                "Feature flags for `opt_shutdown_anysegwit`.", set_shutdown_any_segwit_optional,
                set_shutdown_any_segwit_required, supports_shutdown_anysegwit, requires_shutdown_anysegwit);
@@ -464,12 +469,24 @@ impl<T: sealed::Context> Clone for Features<T> {
 }
 impl<T: sealed::Context> Hash for Features<T> {
        fn hash<H: Hasher>(&self, hasher: &mut H) {
-               self.flags.hash(hasher);
+               let mut nonzero_flags = &self.flags[..];
+               while nonzero_flags.last() == Some(&0) {
+                       nonzero_flags = &nonzero_flags[..nonzero_flags.len() - 1];
+               }
+               nonzero_flags.hash(hasher);
        }
 }
 impl<T: sealed::Context> PartialEq for Features<T> {
        fn eq(&self, o: &Self) -> bool {
-               self.flags.eq(&o.flags)
+               let mut o_iter = o.flags.iter();
+               let mut self_iter = self.flags.iter();
+               loop {
+                       match (o_iter.next(), self_iter.next()) {
+                               (Some(o), Some(us)) => if o != us { return false },
+                               (Some(b), None) | (None, Some(b)) => if *b != 0 { return false },
+                               (None, None) => return true,
+                       }
+               }
        }
 }
 impl<T: sealed::Context> PartialOrd for Features<T> {
@@ -1053,6 +1070,7 @@ mod tests {
                init_features.set_basic_mpp_optional();
                init_features.set_wumbo_optional();
                init_features.set_anchors_zero_fee_htlc_tx_optional();
+               init_features.set_route_blinding_optional();
                init_features.set_shutdown_any_segwit_optional();
                init_features.set_onion_messages_optional();
                init_features.set_channel_type_optional();
@@ -1068,8 +1086,8 @@ mod tests {
                        // Check that the flags are as expected:
                        // - option_data_loss_protect (req)
                        // - var_onion_optin (req) | static_remote_key (req) | payment_secret(req)
-                       // - basic_mpp | wumbo | anchors_zero_fee_htlc_tx
-                       // - opt_shutdown_anysegwit
+                       // - basic_mpp | wumbo | option_anchors_zero_fee_htlc_tx
+                       // - option_route_blinding | opt_shutdown_anysegwit
                        // - onion_messages
                        // - option_channel_type | option_scid_alias
                        // - option_zeroconf
@@ -1077,7 +1095,7 @@ mod tests {
                        assert_eq!(node_features.flags[0], 0b00000001);
                        assert_eq!(node_features.flags[1], 0b01010001);
                        assert_eq!(node_features.flags[2], 0b10001010);
-                       assert_eq!(node_features.flags[3], 0b00001000);
+                       assert_eq!(node_features.flags[3], 0b00001010);
                        assert_eq!(node_features.flags[4], 0b10000000);
                        assert_eq!(node_features.flags[5], 0b10100000);
                        assert_eq!(node_features.flags[6], 0b00001000);
@@ -1209,4 +1227,26 @@ mod tests {
                assert!(!converted_features.supports_any_optional_bits());
                assert!(converted_features.requires_static_remote_key());
        }
+
+       #[test]
+       #[cfg(feature = "std")]
+       fn test_excess_zero_bytes_ignored() {
+               // Checks that `Hash` and `PartialEq` ignore excess zero bytes, which may appear due to
+               // feature conversion or because a peer serialized their feature poorly.
+               use std::collections::hash_map::DefaultHasher;
+               use std::hash::{Hash, Hasher};
+
+               let mut zerod_features = InitFeatures::empty();
+               zerod_features.flags = vec![0];
+               let empty_features = InitFeatures::empty();
+               assert!(empty_features.flags.is_empty());
+
+               assert_eq!(zerod_features, empty_features);
+
+               let mut zerod_hash = DefaultHasher::new();
+               zerod_features.hash(&mut zerod_hash);
+               let mut empty_hash = DefaultHasher::new();
+               empty_features.hash(&mut empty_hash);
+               assert_eq!(zerod_hash.finish(), empty_hash.finish());
+       }
 }
index d3d2e3322cafb79f58573d6e18aa7deb84672709..a2d9631716c4c23098135a8f95ee37a1fc5a6949 100644 (file)
@@ -1536,6 +1536,18 @@ pub struct ExpectedCloseEvent {
        pub reason: Option<ClosureReason>,
 }
 
+impl ExpectedCloseEvent {
+       pub fn from_id_reason(channel_id: ChannelId, discard_funding: bool, reason: ClosureReason) -> Self {
+               Self {
+                       channel_capacity_sats: None,
+                       channel_id: Some(channel_id),
+                       counterparty_node_id: None,
+                       discard_funding,
+                       reason: Some(reason),
+               }
+       }
+}
+
 /// Check that multiple channel closing events have been issued.
 pub fn check_closed_events(node: &Node, expected_close_events: &[ExpectedCloseEvent]) {
        let closed_events_count = expected_close_events.len();
@@ -1971,6 +1983,18 @@ pub fn get_route(send_node: &Node, route_params: &RouteParameters) -> Result<Rou
        )
 }
 
+/// Like `get_route` above, but adds a random CLTV offset to the final hop.
+pub fn find_route(send_node: &Node, route_params: &RouteParameters) -> Result<Route, msgs::LightningError> {
+       let scorer = TestScorer::new();
+       let keys_manager = TestKeysInterface::new(&[0u8; 32], bitcoin::network::constants::Network::Testnet);
+       let random_seed_bytes = keys_manager.get_secure_random_bytes();
+       router::find_route(
+               &send_node.node.get_our_node_id(), route_params, &send_node.network_graph,
+               Some(&send_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
+               send_node.logger, &scorer, &Default::default(), &random_seed_bytes
+       )
+}
+
 /// Gets a route from the given sender to the node described in `payment_params`.
 ///
 /// Don't use this, use the identically-named function instead.
index 450bc482c1a2dcbcf031be1f5f36a3304c945953..2ad53faa8b2e129874cbec1391156ab58aabc6e5 100644 (file)
@@ -17,7 +17,7 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator;
 use crate::chain::channelmonitor;
 use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
 use crate::chain::transaction::OutPoint;
-use crate::sign::{EcdsaChannelSigner, EntropySource, SignerProvider};
+use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
 use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
 use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
@@ -693,7 +693,7 @@ fn test_update_fee_that_funder_cannot_afford() {
                *feerate_lock += 4;
        }
        nodes[0].node.timer_tick_occurred();
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
+       nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
        check_added_monitors!(nodes[0], 0);
 
        const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
@@ -746,7 +746,7 @@ fn test_update_fee_that_funder_cannot_afford() {
                        &mut htlcs,
                        &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
                );
-               local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
+               local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap()
        };
 
        let commit_signed_msg = msgs::CommitmentSigned {
@@ -768,7 +768,7 @@ fn test_update_fee_that_funder_cannot_afford() {
        //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
        //Should produce and error.
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
+       nodes[1].logger.assert_log("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
        check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
@@ -1415,6 +1415,7 @@ fn test_fee_spike_violation_fails_htlc() {
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
                skimmed_fee_msat: None,
+               blinding_point: None,
        };
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
@@ -1493,7 +1494,7 @@ fn test_fee_spike_violation_fails_htlc() {
                        &mut vec![(accepted_htlc_info, ())],
                        &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
                );
-               local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
+               local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), Vec::new(), &secp_ctx).unwrap()
        };
 
        let commit_signed_msg = msgs::CommitmentSigned {
@@ -1528,7 +1529,7 @@ fn test_fee_spike_violation_fails_htlc() {
                },
                _ => panic!("Unexpected event"),
        };
-       nodes[1].logger.assert_log("lightning::ln::channel".to_string(),
+       nodes[1].logger.assert_log("lightning::ln::channel",
                format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1);
 
        check_added_monitors!(nodes[1], 2);
@@ -1611,11 +1612,12 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
                skimmed_fee_msat: None,
+               blinding_point: None,
        };
 
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
        // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
-       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
+       nodes[0].logger.assert_log("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
@@ -1789,11 +1791,12 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
                skimmed_fee_msat: None,
+               blinding_point: None,
        };
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
        // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote HTLC add would put them under remote reserve value".to_string(), 1);
+       nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value".to_string(), 1);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
@@ -2270,9 +2273,15 @@ fn channel_monitor_network_test() {
        nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
                assert_eq!(node_txn.len(), 1);
+               mine_transaction(&nodes[1], &node_txn[0]);
+               if nodes[1].connect_style.borrow().updates_best_block_first() {
+                       let _ = nodes[1].tx_broadcaster.txn_broadcast();
+               }
+
                mine_transaction(&nodes[0], &node_txn[0]);
                check_added_monitors!(nodes[0], 1);
                test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
@@ -2281,7 +2290,6 @@ fn channel_monitor_network_test() {
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
 
        // One pending HTLC is discarded by the force-close:
        let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
@@ -2590,8 +2598,8 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment:
        // that a revoked commitment transaction is broadcasted
        // (Similar to `revoked_output_claim` test but we get the justice tx + broadcast manually)
        let chanmon_cfgs = create_chanmon_cfgs(2);
-       let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script().unwrap();
-       let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script().unwrap();
+       let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script([0; 32]).unwrap();
+       let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script([0; 32]).unwrap();
        let persisters = vec![WatchtowerPersister::new(destination_script0),
                WatchtowerPersister::new(destination_script1)];
        let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect());
@@ -3510,6 +3518,7 @@ fn fail_backward_pending_htlc_upon_channel_failure() {
                        cltv_expiry,
                        onion_routing_packet,
                        skimmed_fee_msat: None,
+                       blinding_point: None,
                };
                nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
        }
@@ -3552,7 +3561,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
                // connect_style.
                return;
        }
-       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;
 
        route_payment(&nodes[0], &[&nodes[1]], 10000000);
        nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
@@ -3561,11 +3570,12 @@ fn test_htlc_ignore_latest_remote_commitment() {
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
 
-       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-       assert_eq!(node_txn.len(), 3);
-       assert_eq!(node_txn[0].txid(), node_txn[1].txid());
+       let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+       assert_eq!(node_txn.len(), 2);
+       check_spends!(node_txn[0], funding_tx);
+       check_spends!(node_txn[1], node_txn[0]);
 
-       let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[1].clone()]);
+       let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]);
        connect_block(&nodes[1], &block);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
@@ -3622,7 +3632,7 @@ fn test_force_close_fail_back() {
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
        check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
-       let tx = {
+       let commitment_tx = {
                let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
                // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
@@ -3631,7 +3641,7 @@ fn test_force_close_fail_back() {
                node_txn.remove(0)
        };
 
-       mine_transaction(&nodes[1], &tx);
+       mine_transaction(&nodes[1], &commitment_tx);
 
        // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
        check_closed_broadcast!(nodes[1], true);
@@ -3643,15 +3653,16 @@ fn test_force_close_fail_back() {
                get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
                        .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
        }
-       mine_transaction(&nodes[2], &tx);
-       let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
-       assert_eq!(node_txn.len(), 1);
-       assert_eq!(node_txn[0].input.len(), 1);
-       assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
-       assert_eq!(node_txn[0].lock_time, LockTime::ZERO); // Must be an HTLC-Success
-       assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
+       mine_transaction(&nodes[2], &commitment_tx);
+       let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast();
+       assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
+       let htlc_tx = node_txn.pop().unwrap();
+       assert_eq!(htlc_tx.input.len(), 1);
+       assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.txid());
+       assert_eq!(htlc_tx.lock_time, LockTime::ZERO); // Must be an HTLC-Success
+       assert_eq!(htlc_tx.input[0].witness.len(), 5); // Must be an HTLC-Success
 
-       check_spends!(node_txn[0], tx);
+       check_spends!(htlc_tx, commitment_tx);
 }
 
 #[test]
@@ -5926,7 +5937,7 @@ fn test_fail_holding_cell_htlc_upon_free() {
        // us to surface its failure to the user.
        chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
+       nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
 
        // Check that the payment failed to be sent out.
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -6014,7 +6025,7 @@ fn test_free_and_fail_holding_cell_htlcs() {
        // to surface its failure to the user. The first payment should succeed.
        chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
+       nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
 
        // Check that the second payment failed to be sent out.
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -6288,7 +6299,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
        updates.update_add_htlcs[0].amount_msat = 0;
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
+       nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC".to_string(), 1);
        check_closed_broadcast!(nodes[1], true).unwrap();
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
@@ -6481,6 +6492,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet.clone(),
                skimmed_fee_msat: None,
+               blinding_point: None,
        };
 
        for i in 0..50 {
@@ -8563,10 +8575,11 @@ fn test_concurrent_monitor_claim() {
        watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
 
        // Watchtower Alice should have broadcast a commitment/HTLC-timeout
-       let alice_state = {
+       {
                let mut txn = alice_broadcaster.txn_broadcast();
                assert_eq!(txn.len(), 2);
-               txn.remove(0)
+               check_spends!(txn[0], chan_1.3);
+               check_spends!(txn[1], txn[0]);
        };
 
        // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
@@ -8635,11 +8648,8 @@ fn test_concurrent_monitor_claim() {
        check_added_monitors(&nodes[0], 1);
        {
                let htlc_txn = alice_broadcaster.txn_broadcast();
-               assert_eq!(htlc_txn.len(), 2);
+               assert_eq!(htlc_txn.len(), 1);
                check_spends!(htlc_txn[0], bob_state_y);
-               // Alice doesn't clean up the old HTLC claim since it hasn't seen a conflicting spend for
-               // it. However, she should, because it now has an invalid parent.
-               check_spends!(htlc_txn[1], alice_state);
        }
 }
 
@@ -8878,7 +8888,12 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                        assert_eq!(bob_txn.len(), 1);
                        check_spends!(bob_txn[0], txn_to_broadcast[0]);
                } else {
-                       assert_eq!(bob_txn.len(), 2);
+                       if nodes[1].connect_style.borrow().updates_best_block_first() {
+                               assert_eq!(bob_txn.len(), 3);
+                               assert_eq!(bob_txn[0].txid(), bob_txn[1].txid());
+                       } else {
+                               assert_eq!(bob_txn.len(), 2);
+                       }
                        check_spends!(bob_txn[0], chan_ab.3);
                }
        }
@@ -8894,15 +8909,16 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
                // Bob force-closed and broadcasts the commitment transaction along with a
                // HTLC-output-claiming transaction.
-               let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+               let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
                if broadcast_alice {
                        assert_eq!(bob_txn.len(), 1);
                        check_spends!(bob_txn[0], txn_to_broadcast[0]);
                        assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
                } else {
-                       assert_eq!(bob_txn.len(), 2);
-                       check_spends!(bob_txn[1], txn_to_broadcast[0]);
-                       assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight);
+                       assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 });
+                       let htlc_tx = bob_txn.pop().unwrap();
+                       check_spends!(htlc_tx, txn_to_broadcast[0]);
+                       assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), script_weight);
                }
        }
 }
@@ -8965,6 +8981,54 @@ fn test_duplicate_temporary_channel_id_from_different_peers() {
        }
 }
 
+#[test]
+fn test_duplicate_funding_err_in_funding() {
+       // Test that if we have a live channel with one peer, then another peer comes along and tries
+       // to create a second channel with the same txid we'll fail and not overwrite the
+       // outpoint_to_peer map in `ChannelManager`.
+       //
+       // This was previously broken.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]);
+       let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 };
+       assert_eq!(real_chan_funding_txo.to_channel_id(), real_channel_id);
+
+       nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
+       let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       let node_c_temp_chan_id = open_chan_msg.temporary_channel_id;
+       open_chan_msg.temporary_channel_id = real_channel_id;
+       nodes[1].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg);
+       let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id());
+       accept_chan_msg.temporary_channel_id = node_c_temp_chan_id;
+       nodes[2].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
+
+       // Now that we have a second channel with the same funding txo, send a bogus funding message
+       // and let nodes[1] remove the inbound channel.
+       let (_, funding_tx, _) = create_funding_transaction(&nodes[2], &nodes[1].node.get_our_node_id(), 100_000, 42);
+
+       nodes[2].node.funding_transaction_generated(&node_c_temp_chan_id, &nodes[1].node.get_our_node_id(), funding_tx).unwrap();
+
+       let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+       funding_created_msg.temporary_channel_id = real_channel_id;
+       // Make the signature invalid by changing the funding output
+       funding_created_msg.funding_output_index += 10;
+       nodes[1].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
+       get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
+       let err = "Invalid funding_created signature from peer".to_owned();
+       let reason = ClosureReason::ProcessingError { err };
+       let expected_closing = ExpectedCloseEvent::from_id_reason(real_channel_id, false, reason);
+       check_closed_events(&nodes[1], &[expected_closing]);
+
+       assert_eq!(
+               *nodes[1].node.outpoint_to_peer.lock().unwrap().get(&real_chan_funding_txo).unwrap(),
+               nodes[0].node.get_our_node_id()
+       );
+}
+
 #[test]
 fn test_duplicate_chan_id() {
        // Test that if a given peer tries to open a channel with the same channel_id as one that is
@@ -9054,7 +9118,7 @@ fn test_duplicate_chan_id() {
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
        create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
 
-       let (_, funding_created) = {
+       let funding_created = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                // Once we call `get_funding_created` the channel has a duplicate channel_id as
@@ -9062,7 +9126,7 @@ fn test_duplicate_chan_id() {
                // try to create another channel. Instead, we drop the channel entirely here (leaving the
                // channelmanager in a possibly nonsense state instead).
                match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() {
-                       ChannelPhase::UnfundedOutboundV1(chan) => {
+                       ChannelPhase::UnfundedOutboundV1(mut chan) => {
                                let logger = test_utils::TestLogger::new();
                                chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
                        },
@@ -9075,6 +9139,12 @@ fn test_duplicate_chan_id() {
        // without trying to persist the `ChannelMonitor`.
        check_added_monitors!(nodes[1], 0);
 
+       check_closed_events(&nodes[1], &[
+               ExpectedCloseEvent::from_id_reason(channel_id, false, ClosureReason::ProcessingError {
+                       err: "Already had channel with the new channel_id".to_owned()
+               })
+       ]);
+
        // ...still, nodes[1] will reject the duplicate channel.
        {
                let events = nodes[1].node.get_and_clear_pending_msg_events();
@@ -9378,8 +9448,12 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
                // We should broadcast an HTLC transaction spending our funding transaction first
                let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                assert_eq!(spending_txn.len(), 2);
-               assert_eq!(spending_txn[0].txid(), node_txn[0].txid());
-               check_spends!(spending_txn[1], node_txn[0]);
+               let htlc_tx = if spending_txn[0].txid() == node_txn[0].txid() {
+                       &spending_txn[1]
+               } else {
+                       &spending_txn[0]
+               };
+               check_spends!(htlc_tx, node_txn[0]);
                // We should also generate a SpendableOutputs event with the to_self output (as its
                // timelock is up).
                let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -9389,7 +9463,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
                // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
                // additional block built on top of the current chain.
                nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
-                       &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
+                       &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1);
                expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
                check_added_monitors!(nodes[1], 1);
 
@@ -9838,10 +9912,10 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                        // Outbound dust balance: 6399 sats
                        let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
                        let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
-                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
+                       nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
                } else {
                        // Outbound dust balance: 5200 sats
-                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(),
+                       nodes[0].logger.assert_log("lightning::ln::channel",
                                format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
                                        dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4,
                                        max_dust_htlc_exposure_msat), 1);
index fb809041af5a491e91efc612880a0ad128676ab9..43ec34eaf610fae5253516f7febde751ba6016b0 100644 (file)
@@ -13,6 +13,7 @@
 #[macro_use]
 pub mod functional_test_utils;
 
+pub mod onion_payment;
 pub mod channelmanager;
 pub mod channel_keys;
 pub mod inbound_payment;
@@ -75,7 +76,7 @@ mod monitor_tests;
 #[cfg(test)]
 #[allow(unused_mut)]
 mod shutdown_tests;
-#[cfg(test)]
+#[cfg(all(test, async_signing))]
 #[allow(unused_mut)]
 mod async_signer_tests;
 
index ad5f03a5cf6b0c4255421a1ceb12e40b58e9575b..74740a6f2279d6549ba57b443b473410e26fbef6 100644 (file)
@@ -9,7 +9,7 @@
 
 //! Further functional tests which test blockchain reorganizations.
 
-use crate::sign::{EcdsaChannelSigner, SpendableOutputDescriptor};
+use crate::sign::{ecdsa::EcdsaChannelSigner, SpendableOutputDescriptor};
 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS, Balance};
 use crate::chain::transaction::OutPoint;
 use crate::chain::chaininterface::{LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight};
@@ -737,7 +737,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) {
                commitment_tx
        };
        let commitment_tx_conf_height_a = block_from_scid(&mine_transaction(&nodes[0], &commitment_tx));
-       if anchors && nodes[0].connect_style.borrow().updates_best_block_first() {
+       if nodes[0].connect_style.borrow().updates_best_block_first() {
                let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
                assert_eq!(txn.len(), 1);
                assert_eq!(txn[0].txid(), commitment_tx.txid());
@@ -1998,6 +1998,11 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool
        };
 
        mine_transaction(&nodes[0], &commitment_tx);
+       if nodes[0].connect_style.borrow().updates_best_block_first() {
+               let txn = nodes[0].tx_broadcaster.txn_broadcast();
+               assert_eq!(txn.len(), 1);
+               assert_eq!(txn[0].txid(), commitment_tx.txid());
+       }
 
        // Connect blocks until the HTLC's expiration is met, expecting a transaction broadcast.
        connect_blocks(&nodes[0], TEST_FINAL_CLTV);
@@ -2401,26 +2406,12 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
        nodes[1].node.timer_tick_occurred();
        check_added_monitors(&nodes[1], 2);
        check_closed_event!(&nodes[1], 2, ClosureReason::OutdatedChannelManager, [nodes[0].node.get_our_node_id(); 2], 1000000);
-       let (revoked_commitment_a, revoked_commitment_b) = {
-               let txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
-               assert_eq!(txn.len(), 2);
-               assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
-               assert_eq!(txn[1].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
-               if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
-                       check_spends!(&txn[0], &chan_a.3);
-                       check_spends!(&txn[1], &chan_b.3);
-                       (txn[0].clone(), txn[1].clone())
-               } else {
-                       check_spends!(&txn[1], &chan_a.3);
-                       check_spends!(&txn[0], &chan_b.3);
-                       (txn[1].clone(), txn[0].clone())
-               }
-       };
 
        // Bob should now receive two events to bump his revoked commitment transaction fees.
        assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
        let events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
+       let mut revoked_commitment_txs = Vec::with_capacity(events.len());
        let mut anchor_txs = Vec::with_capacity(events.len());
        for (idx, event) in events.into_iter().enumerate() {
                let utxo_value = Amount::ONE_BTC.to_sat() * (idx + 1) as u64;
@@ -2440,13 +2431,21 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
                };
                let txn = nodes[1].tx_broadcaster.txn_broadcast();
                assert_eq!(txn.len(), 2);
+               assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
+               if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
+                       check_spends!(&txn[0], &chan_a.3);
+               } else {
+                       check_spends!(&txn[0], &chan_b.3);
+               }
                let (commitment_tx, anchor_tx) = (&txn[0], &txn[1]);
                check_spends!(anchor_tx, coinbase_tx, commitment_tx);
+
+               revoked_commitment_txs.push(commitment_tx.clone());
                anchor_txs.push(anchor_tx.clone());
        };
 
        for node in &nodes {
-               mine_transactions(node, &[&revoked_commitment_a, &anchor_txs[0], &revoked_commitment_b, &anchor_txs[1]]);
+               mine_transactions(node, &[&revoked_commitment_txs[0], &anchor_txs[0], &revoked_commitment_txs[1], &anchor_txs[1]]);
        }
        check_added_monitors!(&nodes[0], 2);
        check_closed_broadcast(&nodes[0], 2, true);
@@ -2458,7 +2457,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
                let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                assert_eq!(txn.len(), 4);
 
-               let (revoked_htlc_claim_a, revoked_htlc_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_a.txid() {
+               let (revoked_htlc_claim_a, revoked_htlc_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_txs[0].txid() {
                        (if txn[0].input.len() == 2 { &txn[0] } else { &txn[1] }, if txn[2].input.len() == 2 { &txn[2] } else { &txn[3] })
                } else {
                        (if txn[2].input.len() == 2 { &txn[2] } else { &txn[3] }, if txn[0].input.len() == 2 { &txn[0] } else { &txn[1] })
@@ -2466,10 +2465,10 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
 
                assert_eq!(revoked_htlc_claim_a.input.len(), 2); // Spends both HTLC outputs
                assert_eq!(revoked_htlc_claim_a.output.len(), 1);
-               check_spends!(revoked_htlc_claim_a, revoked_commitment_a);
+               check_spends!(revoked_htlc_claim_a, revoked_commitment_txs[0]);
                assert_eq!(revoked_htlc_claim_b.input.len(), 2); // Spends both HTLC outputs
                assert_eq!(revoked_htlc_claim_b.output.len(), 1);
-               check_spends!(revoked_htlc_claim_b, revoked_commitment_b);
+               check_spends!(revoked_htlc_claim_b, revoked_commitment_txs[1]);
        }
 
        // Since Bob was able to confirm his revoked commitment, he'll now try to claim the HTLCs
@@ -2549,7 +2548,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
                        sig
                };
                htlc_tx.input[0].witness = Witness::from_slice(&[fee_utxo_sig, public_key.to_bytes()]);
-               check_spends!(htlc_tx, coinbase_tx, revoked_commitment_a, revoked_commitment_b);
+               check_spends!(htlc_tx, coinbase_tx, revoked_commitment_txs[0], revoked_commitment_txs[1]);
                htlc_tx
        };
 
@@ -2608,7 +2607,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
                        ).unwrap();
 
                        if let SpendableOutputDescriptor::StaticPaymentOutput(_) = &outputs[0] {
-                               check_spends!(spend_tx, &revoked_commitment_a, &revoked_commitment_b);
+                               check_spends!(spend_tx, &revoked_commitment_txs[0], &revoked_commitment_txs[1]);
                        } else {
                                check_spends!(spend_tx, revoked_claim_transactions.get(&spend_tx.input[0].previous_output.txid).unwrap());
                        }
@@ -2778,7 +2777,7 @@ fn do_test_monitor_claims_with_random_signatures(anchors: bool, confirm_counterp
 
        // If we update the best block to the new height before providing the confirmed transactions,
        // we'll see another broadcast of the commitment transaction.
-       if anchors && !confirm_counterparty_commitment && nodes[0].connect_style.borrow().updates_best_block_first() {
+       if !confirm_counterparty_commitment && nodes[0].connect_style.borrow().updates_best_block_first() {
                let _ = nodes[0].tx_broadcaster.txn_broadcast();
        }
 
@@ -2796,11 +2795,7 @@ fn do_test_monitor_claims_with_random_signatures(anchors: bool, confirm_counterp
        let htlc_timeout_tx = {
                let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
                assert_eq!(txn.len(), 1);
-               let tx = if txn[0].input[0].previous_output.txid == commitment_tx.txid() {
-                       txn[0].clone()
-               } else {
-                       txn[1].clone()
-               };
+               let tx = txn.pop().unwrap();
                check_spends!(tx, commitment_tx, coinbase_tx);
                tx
        };
index d5529e98ba013c4f8d2118edb5de4bebb82470df..2e56e2fc2c0c95d2e3cb3380d1f8b50138404ead 100644 (file)
@@ -31,7 +31,7 @@ use bitcoin::{secp256k1, Witness};
 use bitcoin::blockdata::script::ScriptBuf;
 use bitcoin::hash_types::Txid;
 
-use crate::blinded_path::payment::ReceiveTlvs;
+use crate::blinded_path::payment::{BlindedPaymentTlvs, ForwardTlvs, ReceiveTlvs};
 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 use crate::ln::onion_utils;
@@ -52,7 +52,7 @@ use core::fmt::Display;
 use crate::io::{self, Cursor, Read};
 use crate::io_extras::read_to_end;
 
-use crate::events::MessageSendEventsProvider;
+use crate::events::{EventsProvider, MessageSendEventsProvider};
 use crate::util::chacha20poly1305rfc::ChaChaPolyReadAdapter;
 use crate::util::logger;
 use crate::util::ser::{LengthReadable, LengthReadableArgs, Readable, ReadableArgs, Writeable, Writer, WithoutLength, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname, TransactionU16LenLimited, BigSize};
@@ -680,7 +680,11 @@ pub struct UpdateAddHTLC {
        ///
        /// [`ChannelConfig::accept_underpaying_htlcs`]: crate::util::config::ChannelConfig::accept_underpaying_htlcs
        pub skimmed_fee_msat: Option<u64>,
-       pub(crate) onion_routing_packet: OnionPacket,
+       /// The onion routing packet with encrypted data for the next hop.
+       pub onion_routing_packet: OnionPacket,
+       /// Provided if we are relaying or receiving a payment within a blinded path, to decrypt the onion
+       /// routing packet and the recipient-provided encrypted payload within.
+       pub blinding_point: Option<PublicKey>,
 }
 
  /// An onion message to be sent to or received from a peer.
@@ -1627,7 +1631,7 @@ pub trait RoutingMessageHandler : MessageSendEventsProvider {
 }
 
 /// A handler for received [`OnionMessage`]s and for providing generated ones to send.
-pub trait OnionMessageHandler {
+pub trait OnionMessageHandler: EventsProvider {
        /// Handle an incoming `onion_message` message from the given peer.
        fn handle_onion_message(&self, peer_node_id: &PublicKey, msg: &OnionMessage);
 
@@ -1646,6 +1650,10 @@ pub trait OnionMessageHandler {
        /// drop and refuse to forward onion messages to this peer.
        fn peer_disconnected(&self, their_node_id: &PublicKey);
 
+       /// Performs actions that should happen roughly every ten seconds after startup. Allows handlers
+       /// to drop any buffered onion messages intended for prospective peers.
+       fn timer_tick_occurred(&self);
+
        // Handler information:
        /// Gets the node feature flags which this handler itself supports. All available handlers are
        /// queried similarly and their feature flags are OR'd together to form the [`NodeFeatures`]
@@ -1660,21 +1668,31 @@ pub trait OnionMessageHandler {
        fn provided_init_features(&self, their_node_id: &PublicKey) -> InitFeatures;
 }
 
+#[derive(Clone)]
+#[cfg_attr(test, derive(Debug, PartialEq))]
+/// Information communicated in the onion to the recipient for multi-part tracking and proof that
+/// the payment is associated with an invoice.
+pub struct FinalOnionHopData {
+       /// When sending a multi-part payment, this secret is used to identify a payment across HTLCs.
+       /// Because it is generated by the recipient and included in the invoice, it also provides
+       /// proof to the recipient that the payment was sent by someone with the generated invoice.
+       pub payment_secret: PaymentSecret,
+       /// The intended total amount that this payment is for.
+       ///
+       /// Message serialization may panic if this value is more than 21 million Bitcoin.
+       pub total_msat: u64,
+}
+
 mod fuzzy_internal_msgs {
        use bitcoin::secp256k1::PublicKey;
-       use crate::blinded_path::payment::PaymentConstraints;
+       use crate::blinded_path::payment::{PaymentConstraints, PaymentRelay};
        use crate::prelude::*;
        use crate::ln::{PaymentPreimage, PaymentSecret};
+       use crate::ln::features::BlindedHopFeatures;
+       use super::FinalOnionHopData;
 
        // These types aren't intended to be pub, but are exposed for direct fuzzing (as we deserialize
        // them from untrusted input):
-       #[derive(Clone)]
-       pub struct FinalOnionHopData {
-               pub payment_secret: PaymentSecret,
-               /// The total value, in msat, of the payment as received by the ultimate recipient.
-               /// Message serialization may panic if this value is more than 21 million Bitcoin.
-               pub total_msat: u64,
-       }
 
        pub enum InboundOnionPayload {
                Forward {
@@ -1691,13 +1709,20 @@ mod fuzzy_internal_msgs {
                        amt_msat: u64,
                        outgoing_cltv_value: u32,
                },
+               BlindedForward {
+                       short_channel_id: u64,
+                       payment_relay: PaymentRelay,
+                       payment_constraints: PaymentConstraints,
+                       features: BlindedHopFeatures,
+                       intro_node_blinding_point: PublicKey,
+               },
                BlindedReceive {
                        amt_msat: u64,
                        total_msat: u64,
                        outgoing_cltv_value: u32,
                        payment_secret: PaymentSecret,
                        payment_constraints: PaymentConstraints,
-                       intro_node_blinding_point: PublicKey,
+                       intro_node_blinding_point: Option<PublicKey>,
                }
        }
 
@@ -2211,6 +2236,7 @@ impl_writeable_msg!(UpdateAddHTLC, {
        cltv_expiry,
        onion_routing_packet,
 }, {
+       (0, blinding_point, option),
        (65537, skimmed_fee_msat, option)
 });
 
@@ -2302,8 +2328,10 @@ impl Writeable for OutboundOnionPayload {
        }
 }
 
-impl<NS: Deref> ReadableArgs<&NS> for InboundOnionPayload where NS::Target: NodeSigner {
-       fn read<R: Read>(r: &mut R, node_signer: &NS) -> Result<Self, DecodeError> {
+impl<NS: Deref> ReadableArgs<(Option<PublicKey>, &NS)> for InboundOnionPayload where NS::Target: NodeSigner {
+       fn read<R: Read>(r: &mut R, args: (Option<PublicKey>, &NS)) -> Result<Self, DecodeError> {
+               let (update_add_blinding_point, node_signer) = args;
+
                let mut amt = None;
                let mut cltv_value = None;
                let mut short_id: Option<u64> = None;
@@ -2337,9 +2365,14 @@ impl<NS: Deref> ReadableArgs<&NS> for InboundOnionPayload where NS::Target: Node
                });
 
                if amt.unwrap_or(0) > MAX_VALUE_MSAT { return Err(DecodeError::InvalidValue) }
+               if intro_node_blinding_point.is_some() && update_add_blinding_point.is_some() {
+                       return Err(DecodeError::InvalidValue)
+               }
 
-               if let Some(blinding_point) = intro_node_blinding_point {
-                       if short_id.is_some() || payment_data.is_some() || payment_metadata.is_some() {
+               if let Some(blinding_point) = intro_node_blinding_point.or(update_add_blinding_point) {
+                       if short_id.is_some() || payment_data.is_some() || payment_metadata.is_some() ||
+                               keysend_preimage.is_some()
+                       {
                                return Err(DecodeError::InvalidValue)
                        }
                        let enc_tlvs = encrypted_tlvs_opt.ok_or(DecodeError::InvalidValue)?.0;
@@ -2349,7 +2382,23 @@ impl<NS: Deref> ReadableArgs<&NS> for InboundOnionPayload where NS::Target: Node
                        let mut s = Cursor::new(&enc_tlvs);
                        let mut reader = FixedLengthReader::new(&mut s, enc_tlvs.len() as u64);
                        match ChaChaPolyReadAdapter::read(&mut reader, rho)? {
-                               ChaChaPolyReadAdapter { readable: ReceiveTlvs { payment_secret, payment_constraints }} => {
+                               ChaChaPolyReadAdapter { readable: BlindedPaymentTlvs::Forward(ForwardTlvs {
+                                       short_channel_id, payment_relay, payment_constraints, features
+                               })} => {
+                                       if amt.is_some() || cltv_value.is_some() || total_msat.is_some() {
+                                               return Err(DecodeError::InvalidValue)
+                                       }
+                                       Ok(Self::BlindedForward {
+                                               short_channel_id,
+                                               payment_relay,
+                                               payment_constraints,
+                                               features,
+                                               intro_node_blinding_point: intro_node_blinding_point.ok_or(DecodeError::InvalidValue)?,
+                                       })
+                               },
+                               ChaChaPolyReadAdapter { readable: BlindedPaymentTlvs::Receive(ReceiveTlvs {
+                                       payment_secret, payment_constraints
+                               })} => {
                                        if total_msat.unwrap_or(0) > MAX_VALUE_MSAT { return Err(DecodeError::InvalidValue) }
                                        Ok(Self::BlindedReceive {
                                                amt_msat: amt.ok_or(DecodeError::InvalidValue)?,
@@ -2357,7 +2406,7 @@ impl<NS: Deref> ReadableArgs<&NS> for InboundOnionPayload where NS::Target: Node
                                                outgoing_cltv_value: cltv_value.ok_or(DecodeError::InvalidValue)?,
                                                payment_secret,
                                                payment_constraints,
-                                               intro_node_blinding_point: blinding_point,
+                                               intro_node_blinding_point,
                                        })
                                },
                        }
@@ -3756,6 +3805,7 @@ mod tests {
                        cltv_expiry: 821716,
                        onion_routing_packet,
                        skimmed_fee_msat: None,
+                       blinding_point: None,
                };
                let encoded_value = update_add_htlc.encode();
                let target_value = <Vec<u8>>::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d32144668701144760101010101010101010101010101010101010101010101010101010101010101000c89d4ff031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202").unwrap();
@@ -3953,7 +4003,7 @@ mod tests {
                assert_eq!(encoded_value, target_value);
 
                let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
-               let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
+               let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), (None, &&node_signer)).unwrap();
                if let msgs::InboundOnionPayload::Forward {
                        short_channel_id, amt_to_forward, outgoing_cltv_value
                } = inbound_msg {
@@ -3978,7 +4028,7 @@ mod tests {
                assert_eq!(encoded_value, target_value);
 
                let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
-               let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
+               let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), (None, &&node_signer)).unwrap();
                if let msgs::InboundOnionPayload::Receive {
                        payment_data: None, amt_msat, outgoing_cltv_value, ..
                } = inbound_msg {
@@ -4006,7 +4056,7 @@ mod tests {
                assert_eq!(encoded_value, target_value);
 
                let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
-               let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
+               let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), (None, &&node_signer)).unwrap();
                if let msgs::InboundOnionPayload::Receive {
                        payment_data: Some(FinalOnionHopData {
                                payment_secret,
@@ -4042,7 +4092,7 @@ mod tests {
                };
                let encoded_value = msg.encode();
                let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
-               assert!(msgs::InboundOnionPayload::read(&mut Cursor::new(&encoded_value[..]), &&node_signer).is_err());
+               assert!(msgs::InboundOnionPayload::read(&mut Cursor::new(&encoded_value[..]), (None, &&node_signer)).is_err());
                let good_type_range_tlvs = vec![
                        ((1 << 16) - 3, vec![42]),
                        ((1 << 16) - 1, vec![42; 32]),
@@ -4051,7 +4101,7 @@ mod tests {
                        *custom_tlvs = good_type_range_tlvs.clone();
                }
                let encoded_value = msg.encode();
-               let inbound_msg = ReadableArgs::read(&mut Cursor::new(&encoded_value[..]), &&node_signer).unwrap();
+               let inbound_msg = ReadableArgs::read(&mut Cursor::new(&encoded_value[..]), (None, &&node_signer)).unwrap();
                match inbound_msg {
                        msgs::InboundOnionPayload::Receive { custom_tlvs, .. } => assert!(custom_tlvs.is_empty()),
                        _ => panic!(),
@@ -4076,7 +4126,7 @@ mod tests {
                let target_value = <Vec<u8>>::from_hex("2e02080badf00d010203040404ffffffffff0000000146c6616b021234ff0000000146c6616f084242424242424242").unwrap();
                assert_eq!(encoded_value, target_value);
                let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
-               let inbound_msg: msgs::InboundOnionPayload = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
+               let inbound_msg: msgs::InboundOnionPayload = ReadableArgs::read(&mut Cursor::new(&target_value[..]), (None, &&node_signer)).unwrap();
                if let msgs::InboundOnionPayload::Receive {
                        payment_data: None,
                        payment_metadata: None,
@@ -4241,8 +4291,8 @@ mod tests {
                let mut rd = Cursor::new(&big_payload[..]);
 
                let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
-               <msgs::InboundOnionPayload as ReadableArgs<&&test_utils::TestKeysInterface>>
-                       ::read(&mut rd, &&node_signer).unwrap();
+               <msgs::InboundOnionPayload as ReadableArgs<(Option<PublicKey>, &&test_utils::TestKeysInterface)>>
+                       ::read(&mut rd, (None, &&node_signer)).unwrap();
        }
        // see above test, needs to be a separate method for use of the serialization macros.
        fn encode_big_payload() -> Result<Vec<u8>, io::Error> {
diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs
new file mode 100644 (file)
index 0000000..5966dce
--- /dev/null
@@ -0,0 +1,646 @@
+//! Utilities to decode payment onions and do contextless validation of incoming payments.
+//!
+//! Primarily features [`peel_payment_onion`], which allows the decoding of an onion statelessly
+//! and can be used to predict whether we'd accept a payment.
+
+use bitcoin::hashes::{Hash, HashEngine};
+use bitcoin::hashes::hmac::{Hmac, HmacEngine};
+use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1};
+
+use crate::blinded_path;
+use crate::blinded_path::payment::{PaymentConstraints, PaymentRelay};
+use crate::chain::channelmonitor::{HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS};
+use crate::ln::PaymentHash;
+use crate::ln::channelmanager::{BlindedForward, CLTV_FAR_FAR_AWAY, HTLCFailureMsg, MIN_CLTV_EXPIRY_DELTA, PendingHTLCInfo, PendingHTLCRouting};
+use crate::ln::features::BlindedHopFeatures;
+use crate::ln::msgs;
+use crate::ln::onion_utils;
+use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
+use crate::sign::{NodeSigner, Recipient};
+use crate::util::logger::Logger;
+
+use crate::prelude::*;
+use core::ops::Deref;
+
+/// Invalid inbound onion payment.
+#[derive(Debug)]
+pub struct InboundOnionErr {
+       /// BOLT 4 error code.
+       pub err_code: u16,
+       /// Data attached to this error.
+       pub err_data: Vec<u8>,
+       /// Error message text.
+       pub msg: &'static str,
+}
+
+fn check_blinded_payment_constraints(
+       amt_msat: u64, cltv_expiry: u32, constraints: &PaymentConstraints
+) -> Result<(), ()> {
+       if amt_msat < constraints.htlc_minimum_msat ||
+               cltv_expiry > constraints.max_cltv_expiry
+       { return Err(()) }
+       Ok(())
+}
+
+fn check_blinded_forward(
+       inbound_amt_msat: u64, inbound_cltv_expiry: u32, payment_relay: &PaymentRelay,
+       payment_constraints: &PaymentConstraints, features: &BlindedHopFeatures
+) -> Result<(u64, u32), ()> {
+       let amt_to_forward = blinded_path::payment::amt_to_forward_msat(
+               inbound_amt_msat, payment_relay
+       ).ok_or(())?;
+       let outgoing_cltv_value = inbound_cltv_expiry.checked_sub(
+               payment_relay.cltv_expiry_delta as u32
+       ).ok_or(())?;
+       check_blinded_payment_constraints(inbound_amt_msat, outgoing_cltv_value, payment_constraints)?;
+
+       if features.requires_unknown_bits_from(&BlindedHopFeatures::empty()) { return Err(()) }
+       Ok((amt_to_forward, outgoing_cltv_value))
+}
+
+pub(super) fn create_fwd_pending_htlc_info(
+       msg: &msgs::UpdateAddHTLC, hop_data: msgs::InboundOnionPayload, hop_hmac: [u8; 32],
+       new_packet_bytes: [u8; onion_utils::ONION_DATA_LEN], shared_secret: [u8; 32],
+       next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>
+) -> Result<PendingHTLCInfo, InboundOnionErr> {
+       debug_assert!(next_packet_pubkey_opt.is_some());
+       let outgoing_packet = msgs::OnionPacket {
+               version: 0,
+               public_key: next_packet_pubkey_opt.unwrap_or(Err(secp256k1::Error::InvalidPublicKey)),
+               hop_data: new_packet_bytes,
+               hmac: hop_hmac,
+       };
+
+       let (
+               short_channel_id, amt_to_forward, outgoing_cltv_value, inbound_blinding_point
+       ) = match hop_data {
+               msgs::InboundOnionPayload::Forward { short_channel_id, amt_to_forward, outgoing_cltv_value } =>
+                       (short_channel_id, amt_to_forward, outgoing_cltv_value, None),
+               msgs::InboundOnionPayload::BlindedForward {
+                       short_channel_id, payment_relay, payment_constraints, intro_node_blinding_point, features,
+               } => {
+                       let (amt_to_forward, outgoing_cltv_value) = check_blinded_forward(
+                               msg.amount_msat, msg.cltv_expiry, &payment_relay, &payment_constraints, &features
+                       ).map_err(|()| {
+                               // We should be returning malformed here if `msg.blinding_point` is set, but this is
+                               // unreachable right now since we checked it in `decode_update_add_htlc_onion`.
+                               InboundOnionErr {
+                                       msg: "Underflow calculating outbound amount or cltv value for blinded forward",
+                                       err_code: INVALID_ONION_BLINDING,
+                                       err_data: vec![0; 32],
+                               }
+                       })?;
+                       (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(intro_node_blinding_point))
+               },
+               msgs::InboundOnionPayload::Receive { .. } | msgs::InboundOnionPayload::BlindedReceive { .. } =>
+                       return Err(InboundOnionErr {
+                               msg: "Final Node OnionHopData provided for us as an intermediary node",
+                               err_code: 0x4000 | 22,
+                               err_data: Vec::new(),
+                       }),
+       };
+
+       Ok(PendingHTLCInfo {
+               routing: PendingHTLCRouting::Forward {
+                       onion_packet: outgoing_packet,
+                       short_channel_id,
+                       blinded: inbound_blinding_point.map(|bp| BlindedForward { inbound_blinding_point: bp }),
+               },
+               payment_hash: msg.payment_hash,
+               incoming_shared_secret: shared_secret,
+               incoming_amt_msat: Some(msg.amount_msat),
+               outgoing_amt_msat: amt_to_forward,
+               outgoing_cltv_value,
+               skimmed_fee_msat: None,
+       })
+}
+
+pub(super) fn create_recv_pending_htlc_info(
+       hop_data: msgs::InboundOnionPayload, shared_secret: [u8; 32], payment_hash: PaymentHash,
+       amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool,
+       counterparty_skimmed_fee_msat: Option<u64>, current_height: u32, accept_mpp_keysend: bool,
+) -> Result<PendingHTLCInfo, InboundOnionErr> {
+       let (
+               payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, outgoing_cltv_value,
+               payment_metadata, requires_blinded_error
+       ) = match hop_data {
+               msgs::InboundOnionPayload::Receive {
+                       payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata, ..
+               } =>
+                       (payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata,
+                        false),
+               msgs::InboundOnionPayload::BlindedReceive {
+                       amt_msat, total_msat, outgoing_cltv_value, payment_secret, intro_node_blinding_point,
+                       payment_constraints, ..
+               } => {
+                       check_blinded_payment_constraints(amt_msat, cltv_expiry, &payment_constraints)
+                               .map_err(|()| {
+                                       InboundOnionErr {
+                                               err_code: INVALID_ONION_BLINDING,
+                                               err_data: vec![0; 32],
+                                               msg: "Amount or cltv_expiry violated blinded payment constraints",
+                                       }
+                               })?;
+                       let payment_data = msgs::FinalOnionHopData { payment_secret, total_msat };
+                       (Some(payment_data), None, Vec::new(), amt_msat, outgoing_cltv_value, None,
+                        intro_node_blinding_point.is_none())
+               }
+               msgs::InboundOnionPayload::Forward { .. } => {
+                       return Err(InboundOnionErr {
+                               err_code: 0x4000|22,
+                               err_data: Vec::new(),
+                               msg: "Got non final data with an HMAC of 0",
+                       })
+               },
+               msgs::InboundOnionPayload::BlindedForward { .. } => {
+                       return Err(InboundOnionErr {
+                               err_code: INVALID_ONION_BLINDING,
+                               err_data: vec![0; 32],
+                               msg: "Got blinded non final data with an HMAC of 0",
+                       })
+               }
+       };
+       // final_incorrect_cltv_expiry
+       if outgoing_cltv_value > cltv_expiry {
+               return Err(InboundOnionErr {
+                       msg: "Upstream node set CLTV to less than the CLTV set by the sender",
+                       err_code: 18,
+                       err_data: cltv_expiry.to_be_bytes().to_vec()
+               })
+       }
+       // final_expiry_too_soon
+       // We have to have some headroom to broadcast on chain if we have the preimage, so make sure
+       // we have at least HTLC_FAIL_BACK_BUFFER blocks to go.
+       //
+       // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
+       // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
+       // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
+       if cltv_expiry <= current_height + HTLC_FAIL_BACK_BUFFER + 1 {
+               let mut err_data = Vec::with_capacity(12);
+               err_data.extend_from_slice(&amt_msat.to_be_bytes());
+               err_data.extend_from_slice(&current_height.to_be_bytes());
+               return Err(InboundOnionErr {
+                       err_code: 0x4000 | 15, err_data,
+                       msg: "The final CLTV expiry is too soon to handle",
+               });
+       }
+       if (!allow_underpay && onion_amt_msat > amt_msat) ||
+               (allow_underpay && onion_amt_msat >
+                amt_msat.saturating_add(counterparty_skimmed_fee_msat.unwrap_or(0)))
+       {
+               return Err(InboundOnionErr {
+                       err_code: 19,
+                       err_data: amt_msat.to_be_bytes().to_vec(),
+                       msg: "Upstream node sent less than we were supposed to receive in payment",
+               });
+       }
+
+       let routing = if let Some(payment_preimage) = keysend_preimage {
+               // We need to check that the sender knows the keysend preimage before processing this
+               // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
+               // could discover the final destination of X, by probing the adjacent nodes on the route
+               // with a keysend payment of identical payment hash to X and observing the processing
+               // time discrepancies due to a hash collision with X.
+               let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array());
+               if hashed_preimage != payment_hash {
+                       return Err(InboundOnionErr {
+                               err_code: 0x4000|22,
+                               err_data: Vec::new(),
+                               msg: "Payment preimage didn't match payment hash",
+                       });
+               }
+               if !accept_mpp_keysend && payment_data.is_some() {
+                       return Err(InboundOnionErr {
+                               err_code: 0x4000|22,
+                               err_data: Vec::new(),
+                               msg: "We don't support MPP keysend payments",
+                       });
+               }
+               PendingHTLCRouting::ReceiveKeysend {
+                       payment_data,
+                       payment_preimage,
+                       payment_metadata,
+                       incoming_cltv_expiry: outgoing_cltv_value,
+                       custom_tlvs,
+               }
+       } else if let Some(data) = payment_data {
+               PendingHTLCRouting::Receive {
+                       payment_data: data,
+                       payment_metadata,
+                       incoming_cltv_expiry: outgoing_cltv_value,
+                       phantom_shared_secret,
+                       custom_tlvs,
+                       requires_blinded_error,
+               }
+       } else {
+               return Err(InboundOnionErr {
+                       err_code: 0x4000|0x2000|3,
+                       err_data: Vec::new(),
+                       msg: "We require payment_secrets",
+               });
+       };
+       Ok(PendingHTLCInfo {
+               routing,
+               payment_hash,
+               incoming_shared_secret: shared_secret,
+               incoming_amt_msat: Some(amt_msat),
+               outgoing_amt_msat: onion_amt_msat,
+               outgoing_cltv_value,
+               skimmed_fee_msat: counterparty_skimmed_fee_msat,
+       })
+}
+
+/// Peel one layer off an incoming onion, returning a [`PendingHTLCInfo`] that contains information
+/// about the intended next-hop for the HTLC.
+///
+/// This does all the relevant context-free checks that LDK requires for payment relay or
+/// acceptance. If the payment is to be received, and the amount matches the expected amount for
+/// a given invoice, this indicates the [`msgs::UpdateAddHTLC`], once fully committed in the
+/// channel, will generate an [`Event::PaymentClaimable`].
+///
+/// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable
+pub fn peel_payment_onion<NS: Deref, L: Deref, T: secp256k1::Verification>(
+       msg: &msgs::UpdateAddHTLC, node_signer: &NS, logger: &L, secp_ctx: &Secp256k1<T>,
+       cur_height: u32, accept_mpp_keysend: bool, allow_skimmed_fees: bool,
+) -> Result<PendingHTLCInfo, InboundOnionErr>
+where
+       NS::Target: NodeSigner,
+       L::Target: Logger,
+{
+       let (hop, shared_secret, next_packet_details_opt) =
+               decode_incoming_update_add_htlc_onion(msg, node_signer, logger, secp_ctx
+       ).map_err(|e| {
+               let (err_code, err_data) = match e {
+                       HTLCFailureMsg::Malformed(m) => (m.failure_code, Vec::new()),
+                       HTLCFailureMsg::Relay(r) => (0x4000 | 22, r.reason.data),
+               };
+               let msg = "Failed to decode update add htlc onion";
+               InboundOnionErr { msg, err_code, err_data }
+       })?;
+       Ok(match hop {
+               onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
+                       let NextPacketDetails {
+                               next_packet_pubkey, outgoing_amt_msat: _, outgoing_scid: _, outgoing_cltv_value
+                       } = match next_packet_details_opt {
+                               Some(next_packet_details) => next_packet_details,
+                               // Forward should always include the next hop details
+                               None => return Err(InboundOnionErr {
+                                       msg: "Failed to decode update add htlc onion",
+                                       err_code: 0x4000 | 22,
+                                       err_data: Vec::new(),
+                               }),
+                       };
+
+                       if let Err((err_msg, code)) = check_incoming_htlc_cltv(
+                               cur_height, outgoing_cltv_value, msg.cltv_expiry
+                       ) {
+                               return Err(InboundOnionErr {
+                                       msg: err_msg,
+                                       err_code: code,
+                                       err_data: Vec::new(),
+                               });
+                       }
+
+                       // TODO: If this is potentially a phantom payment we should decode the phantom payment
+                       // onion here and check it.
+
+                       create_fwd_pending_htlc_info(
+                               msg, next_hop_data, next_hop_hmac, new_packet_bytes, shared_secret,
+                               Some(next_packet_pubkey)
+                       )?
+               },
+               onion_utils::Hop::Receive(received_data) => {
+                       create_recv_pending_htlc_info(
+                               received_data, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry,
+                               None, allow_skimmed_fees, msg.skimmed_fee_msat, cur_height, accept_mpp_keysend,
+                       )?
+               }
+       })
+}
+
+pub(super) struct NextPacketDetails {
+       pub(super) next_packet_pubkey: Result<PublicKey, secp256k1::Error>,
+       pub(super) outgoing_scid: u64,
+       pub(super) outgoing_amt_msat: u64,
+       pub(super) outgoing_cltv_value: u32,
+}
+
+pub(super) fn decode_incoming_update_add_htlc_onion<NS: Deref, L: Deref, T: secp256k1::Verification>(
+       msg: &msgs::UpdateAddHTLC, node_signer: &NS, logger: &L, secp_ctx: &Secp256k1<T>,
+) -> Result<(onion_utils::Hop, [u8; 32], Option<NextPacketDetails>), HTLCFailureMsg>
+where
+       NS::Target: NodeSigner,
+       L::Target: Logger,
+{
+       macro_rules! return_malformed_err {
+               ($msg: expr, $err_code: expr) => {
+                       {
+                               log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+                               let (sha256_of_onion, failure_code) = if msg.blinding_point.is_some() {
+                                       ([0; 32], INVALID_ONION_BLINDING)
+                               } else {
+                                       (Sha256::hash(&msg.onion_routing_packet.hop_data).to_byte_array(), $err_code)
+                               };
+                               return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+                                       channel_id: msg.channel_id,
+                                       htlc_id: msg.htlc_id,
+                                       sha256_of_onion,
+                                       failure_code,
+                               }));
+                       }
+               }
+       }
+
+       if let Err(_) = msg.onion_routing_packet.public_key {
+               return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6);
+       }
+
+       let blinded_node_id_tweak = msg.blinding_point.map(|bp| {
+               let blinded_tlvs_ss = node_signer.ecdh(Recipient::Node, &bp, None).unwrap().secret_bytes();
+               let mut hmac = HmacEngine::<Sha256>::new(b"blinded_node_id");
+               hmac.input(blinded_tlvs_ss.as_ref());
+               Scalar::from_be_bytes(Hmac::from_engine(hmac).to_byte_array()).unwrap()
+       });
+       let shared_secret = node_signer.ecdh(
+               Recipient::Node, &msg.onion_routing_packet.public_key.unwrap(), blinded_node_id_tweak.as_ref()
+       ).unwrap().secret_bytes();
+
+       if msg.onion_routing_packet.version != 0 {
+               //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
+               //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
+               //the hash doesn't really serve any purpose - in the case of hashing all data, the
+               //receiving node would have to brute force to figure out which version was put in the
+               //packet by the node that send us the message, in the case of hashing the hop_data, the
+               //node knows the HMAC matched, so they already know what is there...
+               return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4);
+       }
+       macro_rules! return_err {
+               ($msg: expr, $err_code: expr, $data: expr) => {
+                       {
+                               if msg.blinding_point.is_some() {
+                                       return_malformed_err!($msg, INVALID_ONION_BLINDING)
+                               }
+
+                               log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+                               return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+                                       channel_id: msg.channel_id,
+                                       htlc_id: msg.htlc_id,
+                                       reason: HTLCFailReason::reason($err_code, $data.to_vec())
+                                               .get_encrypted_failure_packet(&shared_secret, &None),
+                               }));
+                       }
+               }
+       }
+
+       let next_hop = match onion_utils::decode_next_payment_hop(
+               shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac,
+               msg.payment_hash, msg.blinding_point, node_signer
+       ) {
+               Ok(res) => res,
+               Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
+                       return_malformed_err!(err_msg, err_code);
+               },
+               Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
+                       return_err!(err_msg, err_code, &[0; 0]);
+               },
+       };
+
+       let next_packet_details = match next_hop {
+               onion_utils::Hop::Forward {
+                       next_hop_data: msgs::InboundOnionPayload::Forward {
+                               short_channel_id, amt_to_forward, outgoing_cltv_value
+                       }, ..
+               } => {
+                       let next_packet_pubkey = onion_utils::next_hop_pubkey(secp_ctx,
+                               msg.onion_routing_packet.public_key.unwrap(), &shared_secret);
+                       NextPacketDetails {
+                               next_packet_pubkey, outgoing_scid: short_channel_id,
+                               outgoing_amt_msat: amt_to_forward, outgoing_cltv_value
+                       }
+               },
+               onion_utils::Hop::Forward {
+                       next_hop_data: msgs::InboundOnionPayload::BlindedForward {
+                               short_channel_id, ref payment_relay, ref payment_constraints, ref features, ..
+                       }, ..
+               } => {
+                       let (amt_to_forward, outgoing_cltv_value) = match check_blinded_forward(
+                               msg.amount_msat, msg.cltv_expiry, &payment_relay, &payment_constraints, &features
+                       ) {
+                               Ok((amt, cltv)) => (amt, cltv),
+                               Err(()) => {
+                                       return_err!("Underflow calculating outbound amount or cltv value for blinded forward",
+                                               INVALID_ONION_BLINDING, &[0; 32]);
+                               }
+                       };
+                       let next_packet_pubkey = onion_utils::next_hop_pubkey(&secp_ctx,
+                               msg.onion_routing_packet.public_key.unwrap(), &shared_secret);
+                       NextPacketDetails {
+                               next_packet_pubkey, outgoing_scid: short_channel_id, outgoing_amt_msat: amt_to_forward,
+                               outgoing_cltv_value
+                       }
+               },
+               onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret, None)),
+               onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::Receive { .. }, .. } |
+                       onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::BlindedReceive { .. }, .. } =>
+               {
+                       return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]);
+               }
+       };
+
+       Ok((next_hop, shared_secret, Some(next_packet_details)))
+}
+
+pub(super) fn check_incoming_htlc_cltv(
+       cur_height: u32, outgoing_cltv_value: u32, cltv_expiry: u32
+) -> Result<(), (&'static str, u16)> {
+       if (cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
+               return Err((
+                       "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
+                       0x1000 | 13, // incorrect_cltv_expiry
+               ));
+       }
+       // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
+       // but we want to be robust wrt to counterparty packet sanitization (see
+       // HTLC_FAIL_BACK_BUFFER rationale).
+       if cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
+               return Err(("CLTV expiry is too close", 0x1000 | 14));
+       }
+       if cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
+               return Err(("CLTV expiry is too far in the future", 21));
+       }
+       // If the HTLC expires ~now, don't bother trying to forward it to our
+       // counterparty. They should fail it anyway, but we don't want to bother with
+       // the round-trips or risk them deciding they definitely want the HTLC and
+       // force-closing to ensure they get it if we're offline.
+       // We previously had a much more aggressive check here which tried to ensure
+       // our counterparty receives an HTLC which has *our* risk threshold met on it,
+       // but there is no need to do that, and since we're a bit conservative with our
+       // risk threshold it just results in failing to forward payments.
+       if (outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
+               return Err(("Outgoing CLTV value is too soon", 0x1000 | 14));
+       }
+
+       Ok(())
+}
+
+#[cfg(test)]
+mod tests {
+       use bitcoin::hashes::Hash;
+       use bitcoin::hashes::sha256::Hash as Sha256;
+       use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+       use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+       use crate::ln::ChannelId;
+       use crate::ln::channelmanager::RecipientOnionFields;
+       use crate::ln::features::{ChannelFeatures, NodeFeatures};
+       use crate::ln::msgs;
+       use crate::ln::onion_utils::create_payment_onion;
+       use crate::routing::router::{Path, RouteHop};
+       use crate::util::test_utils;
+
+       #[test]
+       fn fail_construct_onion_on_too_big_payloads() {
+               // Ensure that if we call `construct_onion_packet` and friends where payloads are too large for
+               // the allotted packet length, we'll fail to construct. Previously, senders would happily
+               // construct invalid packets by array-shifting the final node's HMAC out of the packet when
+               // adding an intermediate onion layer, causing the receiver to error with "final payload
+               // provided for us as an intermediate node."
+               let secp_ctx = Secp256k1::new();
+               let bob = crate::sign::KeysManager::new(&[2; 32], 42, 42);
+               let bob_pk = PublicKey::from_secret_key(&secp_ctx, &bob.get_node_secret_key());
+               let charlie = crate::sign::KeysManager::new(&[3; 32], 42, 42);
+               let charlie_pk = PublicKey::from_secret_key(&secp_ctx, &charlie.get_node_secret_key());
+
+               let (
+                       session_priv, total_amt_msat, cur_height, mut recipient_onion, keysend_preimage, payment_hash,
+                       prng_seed, hops, ..
+               ) = payment_onion_args(bob_pk, charlie_pk);
+
+               // Ensure the onion will not fit all the payloads by adding a large custom TLV.
+               recipient_onion.custom_tlvs.push((13377331, vec![0; 1156]));
+
+               let path = Path { hops, blinded_tail: None, };
+               let onion_keys = super::onion_utils::construct_onion_keys(&secp_ctx, &path, &session_priv).unwrap();
+               let (onion_payloads, ..) = super::onion_utils::build_onion_payloads(
+                       &path, total_amt_msat, recipient_onion, cur_height + 1, &Some(keysend_preimage)
+               ).unwrap();
+
+               assert!(super::onion_utils::construct_onion_packet(
+                               onion_payloads, onion_keys, prng_seed, &payment_hash
+               ).is_err());
+       }
+
+       #[test]
+       fn test_peel_payment_onion() {
+               use super::*;
+               let secp_ctx = Secp256k1::new();
+
+               let bob = crate::sign::KeysManager::new(&[2; 32], 42, 42);
+               let bob_pk = PublicKey::from_secret_key(&secp_ctx, &bob.get_node_secret_key());
+               let charlie = crate::sign::KeysManager::new(&[3; 32], 42, 42);
+               let charlie_pk = PublicKey::from_secret_key(&secp_ctx, &charlie.get_node_secret_key());
+
+               let (session_priv, total_amt_msat, cur_height, recipient_onion, preimage, payment_hash,
+                       prng_seed, hops, recipient_amount, pay_secret) = payment_onion_args(bob_pk, charlie_pk);
+
+               let path = Path {
+                       hops: hops,
+                       blinded_tail: None,
+               };
+
+               let (onion, amount_msat, cltv_expiry) = create_payment_onion(
+                       &secp_ctx, &path, &session_priv, total_amt_msat, recipient_onion, cur_height,
+                       &payment_hash, &Some(preimage), prng_seed
+               ).unwrap();
+
+               let msg = make_update_add_msg(amount_msat, cltv_expiry, payment_hash, onion);
+               let logger = test_utils::TestLogger::with_id("bob".to_string());
+
+               let peeled = peel_payment_onion(&msg, &&bob, &&logger, &secp_ctx, cur_height, true, false)
+                       .map_err(|e| e.msg).unwrap();
+
+               let next_onion = match peeled.routing {
+                       PendingHTLCRouting::Forward { onion_packet, .. } => {
+                               onion_packet
+                       },
+                       _ => panic!("expected a forwarded onion"),
+               };
+
+               let msg2 = make_update_add_msg(amount_msat, cltv_expiry, payment_hash, next_onion);
+               let peeled2 = peel_payment_onion(&msg2, &&charlie, &&logger, &secp_ctx, cur_height, true, false)
+                       .map_err(|e| e.msg).unwrap();
+
+               match peeled2.routing {
+                       PendingHTLCRouting::ReceiveKeysend { payment_preimage, payment_data, incoming_cltv_expiry, .. } => {
+                               assert_eq!(payment_preimage, preimage);
+                               assert_eq!(peeled2.outgoing_amt_msat, recipient_amount);
+                               assert_eq!(incoming_cltv_expiry, peeled2.outgoing_cltv_value);
+                               let msgs::FinalOnionHopData{total_msat, payment_secret} = payment_data.unwrap();
+                               assert_eq!(total_msat, total_amt_msat);
+                               assert_eq!(payment_secret, pay_secret);
+                       },
+                       _ => panic!("expected a received keysend"),
+               };
+       }
+
+       fn make_update_add_msg(
+               amount_msat: u64, cltv_expiry: u32, payment_hash: PaymentHash,
+               onion_routing_packet: msgs::OnionPacket
+       ) -> msgs::UpdateAddHTLC {
+               msgs::UpdateAddHTLC {
+                       channel_id: ChannelId::from_bytes([0; 32]),
+                       htlc_id: 0,
+                       amount_msat,
+                       cltv_expiry,
+                       payment_hash,
+                       onion_routing_packet,
+                       skimmed_fee_msat: None,
+                       blinding_point: None,
+               }
+       }
+
+       fn payment_onion_args(hop_pk: PublicKey, recipient_pk: PublicKey) -> (
+               SecretKey, u64, u32, RecipientOnionFields, PaymentPreimage, PaymentHash, [u8; 32],
+               Vec<RouteHop>, u64, PaymentSecret,
+       ) {
+               let session_priv_bytes = [42; 32];
+               let session_priv = SecretKey::from_slice(&session_priv_bytes).unwrap();
+               let total_amt_msat = 1000;
+               let cur_height = 1000;
+               let pay_secret = PaymentSecret([99; 32]);
+               let recipient_onion = RecipientOnionFields::secret_only(pay_secret);
+               let preimage_bytes = [43; 32];
+               let preimage = PaymentPreimage(preimage_bytes);
+               let rhash_bytes = Sha256::hash(&preimage_bytes).to_byte_array();
+               let payment_hash = PaymentHash(rhash_bytes);
+               let prng_seed = [44; 32];
+
+               // make a route alice -> bob -> charlie
+               let hop_fee = 1;
+               let recipient_amount = total_amt_msat - hop_fee;
+               let hops = vec![
+                       RouteHop {
+                               pubkey: hop_pk,
+                               fee_msat: hop_fee,
+                               cltv_expiry_delta: 42,
+                               short_channel_id: 1,
+                               node_features: NodeFeatures::empty(),
+                               channel_features: ChannelFeatures::empty(),
+                               maybe_announced_channel: false,
+                       },
+                       RouteHop {
+                               pubkey: recipient_pk,
+                               fee_msat: recipient_amount,
+                               cltv_expiry_delta: 42,
+                               short_channel_id: 2,
+                               node_features: NodeFeatures::empty(),
+                               channel_features: ChannelFeatures::empty(),
+                               maybe_announced_channel: false,
+                       }
+               ];
+
+               (session_priv, total_amt_msat, cur_height, recipient_onion, preimage, payment_hash,
+                       prng_seed, hops, recipient_amount, pay_secret)
+       }
+
+}
index 31f2f7827bcb57c42d7b2a625a842abdaa00ab17..99e3e965a9a9aab167928198db79b82054c98066 100644 (file)
@@ -242,6 +242,8 @@ pub(super) fn build_onion_payloads(path: &Path, total_msat: u64, mut recipient_o
 /// the hops can be of variable length.
 pub(crate) const ONION_DATA_LEN: usize = 20*65;
 
+pub(super) const INVALID_ONION_BLINDING: u16 = 0x8000 | 0x4000 | 24;
+
 #[inline]
 fn shift_slice_right(arr: &mut [u8], amt: usize) {
        for i in (amt..arr.len()).rev() {
@@ -321,8 +323,6 @@ fn construct_onion_packet_with_init_noise<HD: Writeable, P: Packet>(
 
                let mut pos = 0;
                for (i, (payload, keys)) in payloads.iter().zip(onion_keys.iter()).enumerate() {
-                       if i == payloads.len() - 1 { break; }
-
                        let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
                        for _ in 0..(packet_data.len() - pos) { // TODO: Batch this.
                                let mut dummy = [0; 1];
@@ -336,6 +336,8 @@ fn construct_onion_packet_with_init_noise<HD: Writeable, P: Packet>(
                                return Err(());
                        }
 
+                       if i == payloads.len() - 1 { break; }
+
                        res.resize(pos, 0u8);
                        chacha.process_in_place(&mut res);
                }
@@ -433,11 +435,22 @@ pub(crate) struct DecodedOnionFailure {
        pub(crate) onion_error_data: Option<Vec<u8>>,
 }
 
+/// Note that we always decrypt `packet` in-place here even if the deserialization into
+/// [`msgs::DecodedOnionErrorPacket`] ultimately fails.
+fn decrypt_onion_error_packet(
+       packet: &mut Vec<u8>, shared_secret: SharedSecret
+) -> Result<msgs::DecodedOnionErrorPacket, msgs::DecodeError> {
+       let ammag = gen_ammag_from_shared_secret(shared_secret.as_ref());
+       let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
+       chacha.process_in_place(packet);
+       msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(packet))
+}
+
 /// Process failure we got back from upstream on a payment we sent (implying htlc_source is an
 /// OutboundRoute).
 #[inline]
 pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(
-       secp_ctx: &Secp256k1<T>, logger: &L, htlc_source: &HTLCSource, mut packet_decrypted: Vec<u8>
+       secp_ctx: &Secp256k1<T>, logger: &L, htlc_source: &HTLCSource, mut encrypted_packet: Vec<u8>
 ) -> DecodedOnionFailure where L::Target: Logger {
        let (path, session_priv, first_hop_htlc_msat) = if let &HTLCSource::OutboundRoute {
                ref path, ref session_priv, ref first_hop_htlc_msat, ..
@@ -491,8 +504,21 @@ pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(
                                Some(hop) => hop,
                                None => {
                                        // The failing hop is within a multi-hop blinded path.
-                                       error_code_ret = Some(BADONION | PERM | 24); // invalid_onion_blinding
-                                       error_packet_ret = Some(vec![0; 32]);
+                                       #[cfg(not(test))] {
+                                               error_code_ret = Some(BADONION | PERM | 24); // invalid_onion_blinding
+                                               error_packet_ret = Some(vec![0; 32]);
+                                       }
+                                       #[cfg(test)] {
+                                               // Actually parse the onion error data in tests so we can check that blinded hops fail
+                                               // back correctly.
+                                               let err_packet = decrypt_onion_error_packet(
+                                                       &mut encrypted_packet, shared_secret
+                                               ).unwrap();
+                                               error_code_ret =
+                                                       Some(u16::from_be_bytes(err_packet.failuremsg.get(0..2).unwrap().try_into().unwrap()));
+                                               error_packet_ret = Some(err_packet.failuremsg[2..].to_vec());
+                                       }
+
                                        res = Some(FailureLearnings {
                                                network_update: None, short_channel_id: None, payment_failed_permanently: false
                                        });
@@ -504,15 +530,7 @@ pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(
                let amt_to_forward = htlc_msat - route_hop.fee_msat;
                htlc_msat = amt_to_forward;
 
-               let ammag = gen_ammag_from_shared_secret(shared_secret.as_ref());
-
-               let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len());
-               decryption_tmp.resize(packet_decrypted.len(), 0);
-               let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
-               chacha.process(&packet_decrypted, &mut decryption_tmp[..]);
-               packet_decrypted = decryption_tmp;
-
-               let err_packet = match msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) {
+               let err_packet = match decrypt_onion_error_packet(&mut encrypted_packet, shared_secret) {
                        Ok(p) => p,
                        Err(_) => return
                };
@@ -722,9 +740,11 @@ pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(
 }
 
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+#[cfg_attr(test, derive(PartialEq))]
 pub(super) struct HTLCFailReason(HTLCFailReasonRepr);
 
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+#[cfg_attr(test, derive(PartialEq))]
 enum HTLCFailReasonRepr {
        LightningError {
                err: msgs::OnionErrorPacket,
@@ -920,9 +940,11 @@ pub(crate) enum OnionDecodeErr {
 
 pub(crate) fn decode_next_payment_hop<NS: Deref>(
        shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32], payment_hash: PaymentHash,
-       node_signer: &NS,
+       blinding_point: Option<PublicKey>, node_signer: &NS,
 ) -> Result<Hop, OnionDecodeErr> where NS::Target: NodeSigner {
-       match decode_next_hop(shared_secret, hop_data, hmac_bytes, Some(payment_hash), node_signer) {
+       match decode_next_hop(
+               shared_secret, hop_data, hmac_bytes, Some(payment_hash), (blinding_point, node_signer)
+       ) {
                Ok((next_hop_data, None)) => Ok(Hop::Receive(next_hop_data)),
                Ok((next_hop_data, Some((next_hop_hmac, FixedSizeOnionPacket(new_packet_bytes))))) => {
                        Ok(Hop::Forward {
@@ -1001,18 +1023,20 @@ fn decode_next_hop<T, R: ReadableArgs<T>, N: NextPacketBytes>(shared_secret: [u8
                        if hmac == [0; 32] {
                                #[cfg(test)]
                                {
-                                       // In tests, make sure that the initial onion packet data is, at least, non-0.
-                                       // We could do some fancy randomness test here, but, ehh, whatever.
-                                       // This checks for the issue where you can calculate the path length given the
-                                       // onion data as all the path entries that the originator sent will be here
-                                       // as-is (and were originally 0s).
-                                       // Of course reverse path calculation is still pretty easy given naive routing
-                                       // algorithms, but this fixes the most-obvious case.
-                                       let mut next_bytes = [0; 32];
-                                       chacha_stream.read_exact(&mut next_bytes).unwrap();
-                                       assert_ne!(next_bytes[..], [0; 32][..]);
-                                       chacha_stream.read_exact(&mut next_bytes).unwrap();
-                                       assert_ne!(next_bytes[..], [0; 32][..]);
+                                       if chacha_stream.read.position() < hop_data.len() as u64 - 64 {
+                                               // In tests, make sure that the initial onion packet data is, at least, non-0.
+                                               // We could do some fancy randomness test here, but, ehh, whatever.
+                                               // This checks for the issue where you can calculate the path length given the
+                                               // onion data as all the path entries that the originator sent will be here
+                                               // as-is (and were originally 0s).
+                                               // Of course reverse path calculation is still pretty easy given naive routing
+                                               // algorithms, but this fixes the most-obvious case.
+                                               let mut next_bytes = [0; 32];
+                                               chacha_stream.read_exact(&mut next_bytes).unwrap();
+                                               assert_ne!(next_bytes[..], [0; 32][..]);
+                                               chacha_stream.read_exact(&mut next_bytes).unwrap();
+                                               assert_ne!(next_bytes[..], [0; 32][..]);
+                                       }
                                }
                                return Ok((msg, None)); // We are the final destination for this packet
                        } else {
index a5654b30f65db4a3d2d89d9b7d37a8420d6f5040..73cdf59bbb699fb6966ad964ac99d6f00f3efeb9 100644 (file)
@@ -19,8 +19,9 @@ use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, Mes
 use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, commit_tx_fee_msat, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI};
 use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo};
 use crate::ln::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures};
-use crate::ln::{msgs, ChannelId, PaymentSecret, PaymentPreimage};
+use crate::ln::{msgs, ChannelId, PaymentHash, PaymentSecret, PaymentPreimage};
 use crate::ln::msgs::ChannelMessageHandler;
+use crate::ln::onion_utils;
 use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, Retry};
 use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
 use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters, find_route};
@@ -31,17 +32,20 @@ use crate::util::errors::APIError;
 use crate::util::ser::Writeable;
 use crate::util::string::UntrustedString;
 
+use bitcoin::hashes::Hash;
+use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::network::constants::Network;
+use bitcoin::secp256k1::{Secp256k1, SecretKey};
 
 use crate::prelude::*;
 
+use crate::ln::functional_test_utils;
 use crate::ln::functional_test_utils::*;
 use crate::routing::gossip::NodeId;
 #[cfg(feature = "std")]
-use {
-       crate::util::time::tests::SinceEpoch,
-       std::time::{SystemTime, Instant, Duration}
-};
+use std::time::{SystemTime, Instant, Duration};
+#[cfg(not(feature = "no-std"))]
+use crate::util::time::tests::SinceEpoch;
 
 #[test]
 fn mpp_failure() {
@@ -633,7 +637,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        let nodes_0_deserialized;
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
-       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+       let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
        let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
 
        // Serialize the ChannelManager prior to sending payments
@@ -745,14 +749,21 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
 
        mine_transaction(&nodes[1], &as_commitment_tx);
-       let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-       assert_eq!(bs_htlc_claim_txn.len(), 1);
-       check_spends!(bs_htlc_claim_txn[0], as_commitment_tx);
+       let bs_htlc_claim_txn = {
+               let mut txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
+               assert_eq!(txn.len(), 2);
+               check_spends!(txn[0], funding_tx);
+               check_spends!(txn[1], as_commitment_tx);
+               txn.pop().unwrap()
+       };
 
        if !confirm_before_reload {
                mine_transaction(&nodes[0], &as_commitment_tx);
+               let txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+               assert_eq!(txn.len(), 1);
+               assert_eq!(txn[0].txid(), as_commitment_tx.txid());
        }
-       mine_transaction(&nodes[0], &bs_htlc_claim_txn[0]);
+       mine_transaction(&nodes[0], &bs_htlc_claim_txn);
        expect_payment_sent(&nodes[0], payment_preimage_1, None, true, false);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20);
        let (first_htlc_timeout_tx, second_htlc_timeout_tx) = {
@@ -762,7 +773,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        };
        check_spends!(first_htlc_timeout_tx, as_commitment_tx);
        check_spends!(second_htlc_timeout_tx, as_commitment_tx);
-       if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output {
+       if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn.input[0].previous_output {
                confirm_transaction(&nodes[0], &second_htlc_timeout_tx);
        } else {
                confirm_transaction(&nodes[0], &first_htlc_timeout_tx);
@@ -914,19 +925,23 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
        // the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved
        // after the commitment transaction, so always connect the commitment transaction.
        mine_transaction(&nodes[0], &bs_commitment_tx[0]);
+       if nodes[0].connect_style.borrow().updates_best_block_first() {
+               let _ = nodes[0].tx_broadcaster.txn_broadcast();
+       }
        mine_transaction(&nodes[1], &bs_commitment_tx[0]);
        if !use_dust {
                connect_blocks(&nodes[0], TEST_FINAL_CLTV + (MIN_CLTV_EXPIRY_DELTA as u32));
                connect_blocks(&nodes[1], TEST_FINAL_CLTV + (MIN_CLTV_EXPIRY_DELTA as u32));
                let as_htlc_timeout = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-               check_spends!(as_htlc_timeout[0], bs_commitment_tx[0]);
                assert_eq!(as_htlc_timeout.len(), 1);
+               check_spends!(as_htlc_timeout[0], bs_commitment_tx[0]);
 
                mine_transaction(&nodes[0], &as_htlc_timeout[0]);
-               // nodes[0] may rebroadcast (or RBF-bump) its HTLC-Timeout, so wipe the announced set.
-               nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
                mine_transaction(&nodes[1], &as_htlc_timeout[0]);
        }
+       if nodes[0].connect_style.borrow().updates_best_block_first() {
+               let _ = nodes[0].tx_broadcaster.txn_broadcast();
+       }
 
        // Create a new channel on which to retry the payment before we fail the payment via the
        // HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
@@ -1044,32 +1059,36 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
 
        // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
-       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-       assert_eq!(node_txn.len(), 3);
-       assert_eq!(node_txn[0].txid(), node_txn[1].txid());
-       check_spends!(node_txn[1], funding_tx);
-       check_spends!(node_txn[2], node_txn[1]);
-       let timeout_txn = vec![node_txn[2].clone()];
+       let (commitment_tx, htlc_timeout_tx) = {
+               let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+               assert_eq!(txn.len(), 2);
+               check_spends!(txn[0], funding_tx);
+               check_spends!(txn[1], txn[0]);
+               (txn.remove(0), txn.remove(0))
+       };
 
        nodes[1].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[1], 1);
        expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
 
-       connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[1].clone()]));
+       mine_transaction(&nodes[1], &commitment_tx);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
-       let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-       assert_eq!(claim_txn.len(), 1);
-       check_spends!(claim_txn[0], node_txn[1]);
+       let htlc_success_tx = {
+               let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+               assert_eq!(txn.len(), 1);
+               check_spends!(txn[0], commitment_tx);
+               txn.pop().unwrap()
+       };
 
-       connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[1].clone()]));
+       mine_transaction(&nodes[0], &commitment_tx);
 
        if confirm_commitment_tx {
                connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
        }
 
-       let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, if payment_timeout { timeout_txn } else { vec![claim_txn[0].clone()] });
+       let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, if payment_timeout { vec![htlc_timeout_tx] } else { vec![htlc_success_tx] });
 
        if payment_timeout {
                assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!
@@ -3334,6 +3353,7 @@ fn test_threaded_payment_retries() {
                // We really want std::thread::scope, but its not stable until 1.63. Until then, we get unsafe.
                let node_ref = NodePtr::from_node(&nodes[0]);
                move || {
+                       let _ = &node_ref;
                        let node_a = unsafe { &*node_ref.0 };
                        while Instant::now() < end_time {
                                node_a.node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable
@@ -4194,3 +4214,59 @@ fn  test_htlc_forward_considers_anchor_outputs_value() {
        check_closed_broadcast(&nodes[2], 1, true);
        check_added_monitors(&nodes[2], 1);
 }
+
+#[test]
+fn peel_payment_onion_custom_tlvs() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let secp_ctx = Secp256k1::new();
+
+       let amt_msat = 1000;
+       let payment_params = PaymentParameters::for_keysend(nodes[1].node.get_our_node_id(),
+               TEST_FINAL_CLTV, false);
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
+       let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap();
+       let mut recipient_onion = RecipientOnionFields::spontaneous_empty()
+               .with_custom_tlvs(vec![(414141, vec![42; 1200])]).unwrap();
+       let prng_seed = chanmon_cfgs[0].keys_manager.get_secure_random_bytes();
+       let session_priv = SecretKey::from_slice(&prng_seed[..]).expect("RNG is busted");
+       let keysend_preimage = PaymentPreimage([42; 32]);
+       let payment_hash = PaymentHash(Sha256::hash(&keysend_preimage.0).to_byte_array());
+
+       let (onion_routing_packet, first_hop_msat, cltv_expiry) = onion_utils::create_payment_onion(
+               &secp_ctx, &route.paths[0], &session_priv, amt_msat, recipient_onion.clone(),
+               nodes[0].best_block_info().1, &payment_hash, &Some(keysend_preimage), prng_seed
+       ).unwrap();
+
+       let update_add = msgs::UpdateAddHTLC {
+               channel_id: ChannelId([0; 32]),
+               htlc_id: 42,
+               amount_msat: first_hop_msat,
+               payment_hash,
+               cltv_expiry,
+               skimmed_fee_msat: None,
+               onion_routing_packet,
+               blinding_point: None,
+       };
+       let peeled_onion = crate::ln::onion_payment::peel_payment_onion(
+               &update_add, &&chanmon_cfgs[1].keys_manager, &&chanmon_cfgs[1].logger, &secp_ctx,
+               nodes[1].best_block_info().1, true, false
+       ).unwrap();
+       assert_eq!(peeled_onion.incoming_amt_msat, Some(amt_msat));
+       match peeled_onion.routing {
+               PendingHTLCRouting::ReceiveKeysend {
+                       payment_data, payment_metadata, custom_tlvs, ..
+               } => {
+                       #[cfg(not(c_bindings))]
+                       assert_eq!(&custom_tlvs, recipient_onion.custom_tlvs());
+                       #[cfg(c_bindings)]
+                       assert_eq!(custom_tlvs, recipient_onion.custom_tlvs());
+                       assert!(payment_metadata.is_none());
+                       assert!(payment_data.is_none());
+               },
+               _ => panic!()
+       }
+}
index 3949b97e0d03019a5f79ad4f293d0d450a9d970a..9f49b67932bc02c5972f5b7b1351a933df15eacb 100644 (file)
@@ -18,8 +18,8 @@
 use bitcoin::blockdata::constants::ChainHash;
 use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey};
 
-use crate::sign::{KeysManager, NodeSigner, Recipient};
-use crate::events::{MessageSendEvent, MessageSendEventsProvider};
+use crate::sign::{NodeSigner, Recipient};
+use crate::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 use crate::ln::ChannelId;
 use crate::ln::features::{InitFeatures, NodeFeatures};
 use crate::ln::msgs;
@@ -33,20 +33,27 @@ use crate::ln::wire::{Encode, Type};
 #[cfg(not(c_bindings))]
 use crate::onion_message::{SimpleArcOnionMessenger, SimpleRefOnionMessenger};
 use crate::onion_message::{CustomOnionMessageHandler, OffersMessage, OffersMessageHandler, OnionMessageContents, PendingOnionMessage};
-use crate::routing::gossip::{NetworkGraph, P2PGossipSync, NodeId, NodeAlias};
+use crate::routing::gossip::{NodeId, NodeAlias};
 use crate::util::atomic_counter::AtomicCounter;
-use crate::util::logger::Logger;
+use crate::util::logger::{Logger, WithContext};
 use crate::util::string::PrintableString;
 
 use crate::prelude::*;
 use crate::io;
 use alloc::collections::VecDeque;
-use crate::sync::{Arc, Mutex, MutexGuard, FairRwLock};
+use crate::sync::{Mutex, MutexGuard, FairRwLock};
 use core::sync::atomic::{AtomicBool, AtomicU32, AtomicI32, Ordering};
 use core::{cmp, hash, fmt, mem};
 use core::ops::Deref;
 use core::convert::Infallible;
-#[cfg(feature = "std")] use std::error;
+#[cfg(feature = "std")]
+use std::error;
+#[cfg(not(c_bindings))]
+use {
+       crate::routing::gossip::{NetworkGraph, P2PGossipSync},
+       crate::sign::KeysManager,
+       crate::sync::Arc,
+};
 
 use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hashes::sha256::HashEngine as Sha256Engine;
@@ -89,6 +96,9 @@ pub trait CustomMessageHandler: wire::CustomMessageReader {
 /// A dummy struct which implements `RoutingMessageHandler` without storing any routing information
 /// or doing any processing. You can provide one of these as the route_handler in a MessageHandler.
 pub struct IgnoringMessageHandler{}
+impl EventsProvider for IgnoringMessageHandler {
+       fn process_pending_events<H: Deref>(&self, _handler: H) where H::Target: EventHandler {}
+}
 impl MessageSendEventsProvider for IgnoringMessageHandler {
        fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> { Vec::new() }
 }
@@ -115,6 +125,7 @@ impl OnionMessageHandler for IgnoringMessageHandler {
        fn next_onion_message_for_peer(&self, _peer_node_id: PublicKey) -> Option<msgs::OnionMessage> { None }
        fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
        fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
+       fn timer_tick_occurred(&self) {}
        fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
        fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
                InitFeatures::empty()
@@ -680,6 +691,8 @@ pub trait APeerManager {
        type NS: Deref<Target=Self::NST>;
        /// Gets a reference to the underlying [`PeerManager`].
        fn as_ref(&self) -> &PeerManager<Self::Descriptor, Self::CM, Self::RM, Self::OM, Self::L, Self::CMH, Self::NS>;
+       /// Returns the peer manager's [`OnionMessageHandler`].
+       fn onion_message_handler(&self) -> &Self::OMT;
 }
 
 impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CMH: Deref, NS: Deref>
@@ -705,6 +718,9 @@ APeerManager for PeerManager<Descriptor, CM, RM, OM, L, CMH, NS> where
        type NST = <NS as Deref>::Target;
        type NS = NS;
        fn as_ref(&self) -> &PeerManager<Descriptor, CM, RM, OM, L, CMH, NS> { self }
+       fn onion_message_handler(&self) -> &Self::OMT {
+               self.message_handler.onion_message_handler.deref()
+       }
 }
 
 /// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls
@@ -1253,10 +1269,11 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
        /// Append a message to a peer's pending outbound/write buffer
        fn enqueue_message<M: wire::Type>(&self, peer: &mut Peer, message: &M) {
+               let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                if is_gossip_msg(message.type_id()) {
-                       log_gossip!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0));
+                       log_gossip!(logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0));
                } else {
-                       log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0))
+                       log_trace!(logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0))
                }
                peer.msgs_sent_since_pong += 1;
                peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(message));
@@ -1285,8 +1302,10 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                let mut read_pos = 0;
                                while read_pos < data.len() {
                                        macro_rules! try_potential_handleerror {
-                                               ($peer: expr, $thing: expr) => {
-                                                       match $thing {
+                                               ($peer: expr, $thing: expr) => {{
+                                                       let res = $thing;
+                                                       let logger = WithContext::from(&self.logger, peer_node_id.map(|(id, _)| id), None);
+                                                       match res {
                                                                Ok(x) => x,
                                                                Err(e) => {
                                                                        match e.action {
@@ -1296,7 +1315,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                        // re-entrant code and possibly unexpected behavior. The
                                                                                        // message send is optimistic anyway, and in this case
                                                                                        // we immediately disconnect the peer.
-                                                                                       log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       log_debug!(logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
                                                                                        return Err(PeerHandleError { });
                                                                                },
                                                                                msgs::ErrorAction::DisconnectPeerWithWarning { .. } => {
@@ -1305,32 +1324,32 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                        // re-entrant code and possibly unexpected behavior. The
                                                                                        // message send is optimistic anyway, and in this case
                                                                                        // we immediately disconnect the peer.
-                                                                                       log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       log_debug!(logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
                                                                                        return Err(PeerHandleError { });
                                                                                },
                                                                                msgs::ErrorAction::IgnoreAndLog(level) => {
-                                                                                       log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       log_given_level!(logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
                                                                                        continue
                                                                                },
                                                                                msgs::ErrorAction::IgnoreDuplicateGossip => continue, // Don't even bother logging these
                                                                                msgs::ErrorAction::IgnoreError => {
-                                                                                       log_debug!(self.logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       log_debug!(logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
                                                                                        continue;
                                                                                },
                                                                                msgs::ErrorAction::SendErrorMessage { msg } => {
-                                                                                       log_debug!(self.logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       log_debug!(logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer_node_id), e.err);
                                                                                        self.enqueue_message($peer, &msg);
                                                                                        continue;
                                                                                },
                                                                                msgs::ErrorAction::SendWarningMessage { msg, log_level } => {
-                                                                                       log_given_level!(self.logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       log_given_level!(logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer_node_id), e.err);
                                                                                        self.enqueue_message($peer, &msg);
                                                                                        continue;
                                                                                },
                                                                        }
                                                                }
                                                        }
-                                               }
+                                               }}
                                        }
 
                                        let mut peer_lock = peer_mutex.lock().unwrap();
@@ -1355,9 +1374,10 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                                macro_rules! insert_node_id {
                                                        () => {
+                                                               let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                                                match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap().0) {
                                                                        hash_map::Entry::Occupied(e) => {
-                                                                               log_trace!(self.logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0));
+                                                                               log_trace!(logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0));
                                                                                peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event
                                                                                // Check that the peers map is consistent with the
                                                                                // node_id_to_descriptor map, as this has been broken
@@ -1366,7 +1386,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                return Err(PeerHandleError { })
                                                                        },
                                                                        hash_map::Entry::Vacant(entry) => {
-                                                                               log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap().0));
+                                                                               log_debug!(logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap().0));
                                                                                entry.insert(peer_descriptor.clone())
                                                                        },
                                                                };
@@ -1434,6 +1454,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        peer.pending_read_buffer.resize(18, 0);
                                                                        peer.pending_read_is_header = true;
 
+                                                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                                                        let message = match message_result {
                                                                                Ok(x) => x,
                                                                                Err(e) => {
@@ -1443,16 +1464,16 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                                // the messages enqueued here to not actually
                                                                                                // be sent before the peer is disconnected.
                                                                                                (msgs::DecodeError::UnknownRequiredFeature, Some(ty)) if is_gossip_msg(ty) => {
-                                                                                                       log_gossip!(self.logger, "Got a channel/node announcement with an unknown required feature flag, you may want to update!");
+                                                                                                       log_gossip!(logger, "Got a channel/node announcement with an unknown required feature flag, you may want to update!");
                                                                                                        continue;
                                                                                                }
                                                                                                (msgs::DecodeError::UnsupportedCompression, _) => {
-                                                                                                       log_gossip!(self.logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message");
+                                                                                                       log_gossip!(logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message");
                                                                                                        self.enqueue_message(peer, &msgs::WarningMessage { channel_id: ChannelId::new_zero(), data: "Unsupported message compression: zlib".to_owned() });
                                                                                                        continue;
                                                                                                }
                                                                                                (_, Some(ty)) if is_gossip_msg(ty) => {
-                                                                                                       log_gossip!(self.logger, "Got an invalid value while deserializing a gossip message");
+                                                                                                       log_gossip!(logger, "Got an invalid value while deserializing a gossip message");
                                                                                                        self.enqueue_message(peer, &msgs::WarningMessage {
                                                                                                                channel_id: ChannelId::new_zero(),
                                                                                                                data: format!("Unreadable/bogus gossip message of type {}", ty),
@@ -1460,16 +1481,16 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                                        continue;
                                                                                                }
                                                                                                (msgs::DecodeError::UnknownRequiredFeature, _) => {
-                                                                                                       log_debug!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!");
+                                                                                                       log_debug!(logger, "Received a message with an unknown required feature flag or TLV, you may want to update!");
                                                                                                        return Err(PeerHandleError { });
                                                                                                }
                                                                                                (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { }),
                                                                                                (msgs::DecodeError::InvalidValue, _) => {
-                                                                                                       log_debug!(self.logger, "Got an invalid value while deserializing message");
+                                                                                                       log_debug!(logger, "Got an invalid value while deserializing message");
                                                                                                        return Err(PeerHandleError { });
                                                                                                }
                                                                                                (msgs::DecodeError::ShortRead, _) => {
-                                                                                                       log_debug!(self.logger, "Deserialization failed due to shortness of message");
+                                                                                                       log_debug!(logger, "Deserialization failed due to shortness of message");
                                                                                                        return Err(PeerHandleError { });
                                                                                                }
                                                                                                (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { }),
@@ -1519,6 +1540,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                message: wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>
        ) -> Result<Option<wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> {
                let their_node_id = peer_lock.their_node_id.clone().expect("We know the peer's public key by the time we receive messages").0;
+               let logger = WithContext::from(&self.logger, Some(their_node_id), None);
                peer_lock.received_message_since_timer_tick = true;
 
                // Need an Init as first message
@@ -1536,7 +1558,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                }
                                        }
                                        if !have_compatible_chains {
-                                               log_debug!(self.logger, "Peer does not support any of our supported chains");
+                                               log_debug!(logger, "Peer does not support any of our supported chains");
                                                return Err(PeerHandleError { }.into());
                                        }
                                }
@@ -1544,12 +1566,12 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                        let our_features = self.init_features(&their_node_id);
                        if msg.features.requires_unknown_bits_from(&our_features) {
-                               log_debug!(self.logger, "Peer requires features unknown to us");
+                               log_debug!(logger, "Peer requires features unknown to us");
                                return Err(PeerHandleError { }.into());
                        }
 
                        if our_features.requires_unknown_bits_from(&msg.features) {
-                               log_debug!(self.logger, "We require features unknown to our peer");
+                               log_debug!(logger, "We require features unknown to our peer");
                                return Err(PeerHandleError { }.into());
                        }
 
@@ -1557,7 +1579,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                return Err(PeerHandleError { }.into());
                        }
 
-                       log_info!(self.logger, "Received peer Init message from {}: {}", log_pubkey!(their_node_id), msg.features);
+                       log_info!(logger, "Received peer Init message from {}: {}", log_pubkey!(their_node_id), msg.features);
 
                        // For peers not supporting gossip queries start sync now, otherwise wait until we receive a filter.
                        if msg.features.initial_routing_sync() && !msg.features.supports_gossip_queries() {
@@ -1565,22 +1587,22 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        }
 
                        if let Err(()) = self.message_handler.route_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
-                               log_debug!(self.logger, "Route Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
+                               log_debug!(logger, "Route Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
                                return Err(PeerHandleError { }.into());
                        }
                        if let Err(()) = self.message_handler.chan_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
-                               log_debug!(self.logger, "Channel Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
+                               log_debug!(logger, "Channel Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
                                return Err(PeerHandleError { }.into());
                        }
                        if let Err(()) = self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
-                               log_debug!(self.logger, "Onion Message Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
+                               log_debug!(logger, "Onion Message Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
                                return Err(PeerHandleError { }.into());
                        }
 
                        peer_lock.their_features = Some(msg.features);
                        return Ok(None);
                } else if peer_lock.their_features.is_none() {
-                       log_debug!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(their_node_id));
+                       log_debug!(logger, "Peer {} sent non-Init first message", log_pubkey!(their_node_id));
                        return Err(PeerHandleError { }.into());
                }
 
@@ -1602,9 +1624,9 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                mem::drop(peer_lock);
 
                if is_gossip_msg(message.type_id()) {
-                       log_gossip!(self.logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id));
+                       log_gossip!(logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id));
                } else {
-                       log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id));
+                       log_trace!(logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id));
                }
 
                let mut should_forward = None;
@@ -1618,14 +1640,14 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                // Handled above
                        },
                        wire::Message::Error(msg) => {
-                               log_debug!(self.logger, "Got Err message from {}: {}", log_pubkey!(their_node_id), PrintableString(&msg.data));
+                               log_debug!(logger, "Got Err message from {}: {}", log_pubkey!(their_node_id), PrintableString(&msg.data));
                                self.message_handler.chan_handler.handle_error(&their_node_id, &msg);
                                if msg.channel_id.is_zero() {
                                        return Err(PeerHandleError { }.into());
                                }
                        },
                        wire::Message::Warning(msg) => {
-                               log_debug!(self.logger, "Got warning message from {}: {}", log_pubkey!(their_node_id), PrintableString(&msg.data));
+                               log_debug!(logger, "Got warning message from {}: {}", log_pubkey!(their_node_id), PrintableString(&msg.data));
                        },
 
                        wire::Message::Ping(msg) => {
@@ -1789,11 +1811,11 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                        // Unknown messages:
                        wire::Message::Unknown(type_id) if message.is_even() => {
-                               log_debug!(self.logger, "Received unknown even message of type {}, disconnecting peer!", type_id);
+                               log_debug!(logger, "Received unknown even message of type {}, disconnecting peer!", type_id);
                                return Err(PeerHandleError { }.into());
                        },
                        wire::Message::Unknown(type_id) => {
-                               log_trace!(self.logger, "Received unknown odd message of type {}, ignoring", type_id);
+                               log_trace!(logger, "Received unknown odd message of type {}, ignoring", type_id);
                        },
                        wire::Message::Custom(custom) => {
                                self.message_handler.custom_message_handler.handle_custom_message(custom, &their_node_id)?;
@@ -1810,6 +1832,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
+                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                        if !peer.handshake_complete() ||
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
                                                continue
@@ -1817,7 +1840,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                        debug_assert!(peer.their_node_id.is_some());
                                        debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
                                        if peer.buffer_full_drop_gossip_broadcast() {
-                                               log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
+                                               log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
                                        }
                                        if let Some((_, their_node_id)) = peer.their_node_id {
@@ -1837,6 +1860,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
+                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                        if !peer.handshake_complete() ||
                                                        !peer.should_forward_node_announcement(msg.contents.node_id) {
                                                continue
@@ -1844,7 +1868,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                        debug_assert!(peer.their_node_id.is_some());
                                        debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
                                        if peer.buffer_full_drop_gossip_broadcast() {
-                                               log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
+                                               log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
                                        }
                                        if let Some((_, their_node_id)) = peer.their_node_id {
@@ -1864,6 +1888,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
+                                       let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
                                        if !peer.handshake_complete() ||
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id)  {
                                                continue
@@ -1871,7 +1896,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                        debug_assert!(peer.their_node_id.is_some());
                                        debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
                                        if peer.buffer_full_drop_gossip_broadcast() {
-                                               log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
+                                               log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
                                        }
                                        if except_node.is_some() && peer.their_node_id.as_ref().map(|(pk, _)| pk) == except_node {
@@ -1953,31 +1978,31 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                for event in events_generated.drain(..) {
                                        match event {
                                                MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
                                                                        log_pubkey!(node_id),
                                                                        &msg.temporary_channel_id,
                                                                        log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
@@ -1986,103 +2011,107 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendChannelReady event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendChannelReady event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendStfu { ref node_id, ref msg} => {
-                                                       log_debug!(self.logger, "Handling SendStfu event in peer_handler for node {} for channel {}",
+                                                       let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id));
+                                                       log_debug!(logger, "Handling SendStfu event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                }
                                                MessageSendEvent::SendSplice { ref node_id, ref msg} => {
-                                                       log_debug!(self.logger, "Handling SendSplice event in peer_handler for node {} for channel {}",
+                                                       let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id));
+                                                       log_debug!(logger, "Handling SendSplice event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                }
                                                MessageSendEvent::SendSpliceAck { ref node_id, ref msg} => {
-                                                       log_debug!(self.logger, "Handling SendSpliceAck event in peer_handler for node {} for channel {}",
+                                                       let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id));
+                                                       log_debug!(logger, "Handling SendSpliceAck event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                }
                                                MessageSendEvent::SendSpliceLocked { ref node_id, ref msg} => {
-                                                       log_debug!(self.logger, "Handling SendSpliceLocked event in peer_handler for node {} for channel {}",
+                                                       let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id));
+                                                       log_debug!(logger, "Handling SendSpliceLocked event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                }
                                                MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxAddInput event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxAddInput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxAddOutput event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxAddOutput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxComplete { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxComplete event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxComplete event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxSignatures event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxSignatures event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxInitRbf event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxInitRbf event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxAckRbf event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxAckRbf event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxAbort { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendTxAbort event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxAbort event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
-                                                       log_debug!(self.logger, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(commitment_signed.channel_id)), "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        update_add_htlcs.len(),
                                                                        update_fulfill_htlcs.len(),
@@ -2107,31 +2136,31 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        self.enqueue_message(&mut *peer, commitment_signed);
                                                },
                                                MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling Shutdown event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
-                                                       log_debug!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
                                                                        &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendChannelAnnouncement { ref node_id, ref msg, ref update_msg } => {
-                                                       log_debug!(self.logger, "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}",
+                                                       log_debug!(WithContext::from(&self.logger, Some(*node_id), None), "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}",
                                                                        log_pubkey!(node_id),
                                                                        msg.contents.short_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
@@ -2169,18 +2198,19 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        }
                                                },
                                                MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
-                                                       log_trace!(self.logger, "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
+                                                       log_trace!(WithContext::from(&self.logger, Some(*node_id), None), "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id), msg.contents.short_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::HandleError { node_id, action } => {
+                                                       let logger = WithContext::from(&self.logger, Some(node_id), None);
                                                        match action {
                                                                msgs::ErrorAction::DisconnectPeer { msg } => {
                                                                        if let Some(msg) = msg.as_ref() {
-                                                                               log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
+                                                                               log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
                                                                                        log_pubkey!(node_id), msg.data);
                                                                        } else {
-                                                                               log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {}",
+                                                                               log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler for node {}",
                                                                                        log_pubkey!(node_id));
                                                                        }
                                                                        // We do not have the peers write lock, so we just store that we're
@@ -2190,7 +2220,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        peers_to_disconnect.insert(node_id, msg);
                                                                },
                                                                msgs::ErrorAction::DisconnectPeerWithWarning { msg } => {
-                                                                       log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
+                                                                       log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
                                                                                log_pubkey!(node_id), msg.data);
                                                                        // We do not have the peers write lock, so we just store that we're
                                                                        // about to disconenct the peer and do it after we finish
@@ -2198,20 +2228,20 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        peers_to_disconnect.insert(node_id, Some(wire::Message::Warning(msg)));
                                                                },
                                                                msgs::ErrorAction::IgnoreAndLog(level) => {
-                                                                       log_given_level!(self.logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
+                                                                       log_given_level!(logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
                                                                },
                                                                msgs::ErrorAction::IgnoreDuplicateGossip => {},
                                                                msgs::ErrorAction::IgnoreError => {
-                                                                               log_debug!(self.logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
+                                                                               log_debug!(logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
                                                                        },
                                                                msgs::ErrorAction::SendErrorMessage { ref msg } => {
-                                                                       log_trace!(self.logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
+                                                                       log_trace!(logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
                                                                                        log_pubkey!(node_id),
                                                                                        msg.data);
                                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), msg);
                                                                },
                                                                msgs::ErrorAction::SendWarningMessage { ref msg, ref log_level } => {
-                                                                       log_given_level!(self.logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}",
+                                                                       log_given_level!(logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}",
                                                                                        log_pubkey!(node_id),
                                                                                        msg.data);
                                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), msg);
@@ -2225,7 +2255,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                }
                                                MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => {
-                                                       log_gossip!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
+                                                       log_gossip!(WithContext::from(&self.logger, Some(*node_id), None), "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
                                                                log_pubkey!(node_id),
                                                                msg.short_channel_ids.len(),
                                                                msg.first_blocknum,
@@ -2299,7 +2329,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                debug_assert!(peer.their_node_id.is_some());
                if let Some((node_id, _)) = peer.their_node_id {
-                       log_trace!(self.logger, "Disconnecting peer with id {} due to {}", node_id, reason);
+                       log_trace!(WithContext::from(&self.logger, Some(node_id), None), "Disconnecting peer with id {} due to {}", node_id, reason);
                        self.message_handler.chan_handler.peer_disconnected(&node_id);
                        self.message_handler.onion_message_handler.peer_disconnected(&node_id);
                }
@@ -2318,7 +2348,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        Some(peer_lock) => {
                                let peer = peer_lock.lock().unwrap();
                                if let Some((node_id, _)) = peer.their_node_id {
-                                       log_trace!(self.logger, "Handling disconnection of peer {}", log_pubkey!(node_id));
+                                       log_trace!(WithContext::from(&self.logger, Some(node_id), None), "Handling disconnection of peer {}", log_pubkey!(node_id));
                                        let removed = self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
                                        debug_assert!(removed.is_some(), "descriptor maps should be consistent");
                                        if !peer.handshake_complete() { return; }
@@ -2465,7 +2495,6 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
        // message...
        const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (SocketAddress::MAX_LEN as u32 + 1) / 2;
-       #[deny(const_err)]
        #[allow(dead_code)]
        // ...by failing to compile if the number of addresses that would be half of a message is
        // smaller than 100:
index 052e3eb6e5d98eaace5bd39b71061bd0cc3b6b63..1ac290383a48761fa1e892bbc01fc73685aad983 100644 (file)
@@ -23,7 +23,6 @@ use crate::util::test_utils;
 use crate::util::errors::APIError;
 use crate::util::ser::{Writeable, ReadableArgs};
 use crate::util::config::UserConfig;
-use crate::util::string::UntrustedString;
 
 use bitcoin::hash_types::BlockHash;
 
@@ -493,7 +492,11 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        assert!(found_err);
 }
 
-fn do_test_data_loss_protect(reconnect_panicing: bool) {
+#[cfg(feature = "std")]
+fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, not_stale: bool) {
+       use crate::routing::router::{RouteParameters, PaymentParameters};
+       use crate::ln::channelmanager::Retry;
+       use crate::util::string::UntrustedString;
        // When we get a data_loss_protect proving we're behind, we immediately panic as the
        // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The
        // panic message informs the user they should force-close without broadcasting, which is tested
@@ -517,8 +520,38 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
        let previous_node_state = nodes[0].node.encode();
        let previous_chain_monitor_state = get_monitor!(nodes[0], chan.2).encode();
 
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+       assert!(!substantially_old || !not_stale, "substantially_old and not_stale doesn't make sense");
+       if not_stale || !substantially_old {
+               // Previously, we'd only hit the data_loss_protect assertion if we had a state which
+               // revoked at least two revocations ago, not the latest revocation. Here, we use
+               // `not_stale` to test the boundary condition.
+               let pay_params = PaymentParameters::for_keysend(nodes[1].node.get_our_node_id(), 100, false);
+               let route_params = RouteParameters::from_payment_params_and_value(pay_params, 40000);
+               nodes[0].node.send_spontaneous_payment_with_retry(None, RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), route_params, Retry::Attempts(0)).unwrap();
+               check_added_monitors(&nodes[0], 1);
+               let update_add_commit = SendEvent::from_node(&nodes[0]);
+
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add_commit.msgs[0]);
+               nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update_add_commit.commitment_msg);
+               check_added_monitors(&nodes[1], 1);
+               let (raa, cs) = get_revoke_commit_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
+
+               nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
+               check_added_monitors(&nodes[0], 1);
+               assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+               if !not_stale {
+                       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cs);
+                       check_added_monitors(&nodes[0], 1);
+                       // A now revokes their original state, at which point reconnect should panic
+                       let raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+                       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
+                       check_added_monitors(&nodes[1], 1);
+                       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+               }
+       } else {
+               send_payment(&nodes[0], &[&nodes[1]], 8000000);
+               send_payment(&nodes[0], &[&nodes[1]], 8000000);
+       }
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
@@ -535,89 +568,131 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
 
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 
-               // Check we close channel detecting A is fallen-behind
-               // Check that we sent the warning message when we detected that A has fallen behind,
-               // and give the possibility for A to recover from the warning.
+               // If A has fallen behind substantially, B should send it a message letting it know
+               // that.
                nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
-               let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
-               assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
+               let reestablish_msg;
+               if substantially_old {
+                       let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction: 0 (received) vs 4 (expected)".to_owned();
+
+                       let warn_reestablish = nodes[1].node.get_and_clear_pending_msg_events();
+                       assert_eq!(warn_reestablish.len(), 2);
+                       match warn_reestablish[1] {
+                               MessageSendEvent::HandleError { action: ErrorAction::SendWarningMessage { ref msg, .. }, .. } => {
+                                       assert_eq!(msg.data, warn_msg);
+                               },
+                               _ => panic!("Unexpected events: {:?}", warn_reestablish),
+                       }
+                       reestablish_msg = match &warn_reestablish[0] {
+                               MessageSendEvent::SendChannelReestablish { msg, .. } => msg.clone(),
+                               _ => panic!("Unexpected events: {:?}", warn_reestablish),
+                       };
+               } else {
+                       let msgs = nodes[1].node.get_and_clear_pending_msg_events();
+                       assert!(msgs.len() >= 4);
+                       match msgs.last() {
+                               Some(MessageSendEvent::SendChannelUpdate { .. }) => {},
+                               _ => panic!("Unexpected events: {:?}", msgs),
+                       }
+                       assert!(msgs.iter().any(|msg| matches!(msg, MessageSendEvent::SendRevokeAndACK { .. })));
+                       assert!(msgs.iter().any(|msg| matches!(msg, MessageSendEvent::UpdateHTLCs { .. })));
+                       reestablish_msg = match &msgs[0] {
+                               MessageSendEvent::SendChannelReestablish { msg, .. } => msg.clone(),
+                               _ => panic!("Unexpected events: {:?}", msgs),
+                       };
+               }
 
                {
-                       let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
-                       // The node B should not broadcast the transaction to force close the channel!
+                       let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+                       // The node B should never force-close the channel.
                        assert!(node_txn.is_empty());
                }
 
-               let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
                // Check A panics upon seeing proof it has fallen behind.
-               nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
-               return; // By this point we should have panic'ed!
-       }
+               let reconnect_res = std::panic::catch_unwind(|| {
+                       nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_msg);
+               });
+               if not_stale {
+                       assert!(reconnect_res.is_ok());
+                       // At this point A gets confused because B expects a commitment state newer than A
+                       // has sent, but not a newer revocation secret, so A just (correctly) closes.
+                       check_closed_broadcast(&nodes[0], 1, true);
+                       check_added_monitors(&nodes[0], 1);
+                       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError {
+                               err: "Peer attempted to reestablish channel with a future remote commitment transaction: 2 (received) vs 1 (expected)".to_owned()
+                       }, [nodes[1].node.get_our_node_id()], 1000000);
+               } else {
+                       assert!(reconnect_res.is_err());
+                       // Skip the `Drop` handler for `Node`s as some may be in an invalid (panicked) state.
+                       std::mem::forget(nodes);
+               }
+       } else {
+               assert!(!not_stale, "We only care about the stale case when not testing panicking");
 
-       nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
-       check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1000000);
-       {
-               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(node_txn.len(), 0);
-       }
+               nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1000000);
+               {
+                       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+                       assert_eq!(node_txn.len(), 0);
+               }
+
+               for msg in nodes[0].node.get_and_clear_pending_msg_events() {
+                       if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
+                       } else if let MessageSendEvent::HandleError { ref action, .. } = msg {
+                               match action {
+                                       &ErrorAction::DisconnectPeer { ref msg } => {
+                                               assert_eq!(msg.as_ref().unwrap().data, "Channel force-closed");
+                                       },
+                                       _ => panic!("Unexpected event!"),
+                               }
+                       } else {
+                               panic!("Unexpected event {:?}", msg)
+                       }
+               }
 
-       for msg in nodes[0].node.get_and_clear_pending_msg_events() {
-               if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
-               } else if let MessageSendEvent::HandleError { ref action, .. } = msg {
+               // after the warning message sent by B, we should not able to
+               // use the channel, or reconnect with success to the channel.
+               assert!(nodes[0].node.list_usable_channels().is_empty());
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, false).unwrap();
+               let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+               nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
+               let mut err_msgs_0 = Vec::with_capacity(1);
+               if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[1] {
                        match action {
-                               &ErrorAction::DisconnectPeer { ref msg } => {
-                                       assert_eq!(msg.as_ref().unwrap().data, "Channel force-closed");
+                               &ErrorAction::SendErrorMessage { ref msg } => {
+                                       assert_eq!(msg.data, format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()));
+                                       err_msgs_0.push(msg.clone());
                                },
                                _ => panic!("Unexpected event!"),
                        }
                } else {
-                       panic!("Unexpected event {:?}", msg)
+                       panic!("Unexpected event!");
                }
+               assert_eq!(err_msgs_0.len(), 1);
+               nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
+               assert!(nodes[1].node.list_usable_channels().is_empty());
+               check_added_monitors!(nodes[1], 1);
+               check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) }
+                       , [nodes[0].node.get_our_node_id()], 1000000);
+               check_closed_broadcast!(nodes[1], false);
        }
-
-       // after the warning message sent by B, we should not able to
-       // use the channel, or reconnect with success to the channel.
-       assert!(nodes[0].node.list_usable_channels().is_empty());
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
-               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
-       }, true).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
-               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
-       }, false).unwrap();
-       let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
-
-       nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
-       let mut err_msgs_0 = Vec::with_capacity(1);
-       if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[1] {
-               match action {
-                       &ErrorAction::SendErrorMessage { ref msg } => {
-                               assert_eq!(msg.data, format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()));
-                               err_msgs_0.push(msg.clone());
-                       },
-                       _ => panic!("Unexpected event!"),
-               }
-       } else {
-               panic!("Unexpected event!");
-       }
-       assert_eq!(err_msgs_0.len(), 1);
-       nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
-       assert!(nodes[1].node.list_usable_channels().is_empty());
-       check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) }
-               , [nodes[0].node.get_our_node_id()], 1000000);
-       check_closed_broadcast!(nodes[1], false);
-}
-
-#[test]
-#[should_panic]
-fn test_data_loss_protect_showing_stale_state_panics() {
-       do_test_data_loss_protect(true);
 }
 
 #[test]
-fn test_force_close_without_broadcast() {
-       do_test_data_loss_protect(false);
+#[cfg(feature = "std")]
+fn test_data_loss_protect() {
+       do_test_data_loss_protect(true, false, true);
+       do_test_data_loss_protect(true, true, false);
+       do_test_data_loss_protect(true, false, false);
+       do_test_data_loss_protect(false, true, false);
+       do_test_data_loss_protect(false, false, false);
 }
 
 #[test]
@@ -991,9 +1066,10 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
                        confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
                } else {
                        connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1 + 1);
-                       let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-                       assert_eq!(bs_htlc_timeout_tx.len(), 1);
-                       confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
+                       let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+                       assert_eq!(txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
+                       let bs_htlc_timeout_tx = txn.pop().unwrap();
+                       confirm_transaction(&nodes[1], &bs_htlc_timeout_tx);
                }
        } else {
                confirm_transaction(&nodes[1], &bs_commitment_tx[0]);
index badb78f245a6651be0abffc415565142e4c7bcf5..cce012aa99203edf4f5b327f22d9cc754226a62e 100644 (file)
@@ -666,6 +666,9 @@ fn test_htlc_preimage_claim_holder_commitment_after_counterparty_commitment_reor
 
        mine_transaction(&nodes[0], &commitment_tx_b);
        mine_transaction(&nodes[1], &commitment_tx_b);
+       if nodes[1].connect_style.borrow().updates_best_block_first() {
+               let _ = nodes[1].tx_broadcaster.txn_broadcast();
+       }
 
        // Provide the preimage now, such that we only claim from the holder commitment (since it's
        // currently confirmed) and not the counterparty's.
@@ -756,3 +759,122 @@ fn test_htlc_preimage_claim_prev_counterparty_commitment_after_current_counterpa
        // commitment (still unrevoked) is the currently confirmed closing transaction.
        assert_eq!(htlc_preimage_tx.input[0].witness.second_to_last().unwrap(), &payment_preimage.0[..]);
 }
+
+fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_counterparty_commitment: bool) {
+       // Tests that a node will retry broadcasting its own commitment after seeing a confirmed
+       // counterparty commitment be reorged out.
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       if revoked_counterparty_commitment {
+               chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
+       }
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let mut config = test_default_channel_config();
+       if anchors {
+               config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
+               config.manually_accept_inbound_channels = true;
+       }
+       let persister;
+       let new_chain_monitor;
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
+       let nodes_1_deserialized;
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+       // Route a payment so we have an HTLC to claim as well.
+       let _ = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+
+       if revoked_counterparty_commitment {
+               // Trigger a fee update such that we advance the state. We will have B broadcast its state
+               // without the fee update.
+               let serialized_node = nodes[1].node.encode();
+               let serialized_monitor = get_monitor!(nodes[1], chan_id).encode();
+
+               *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap() += 1;
+               nodes[0].node.timer_tick_occurred();
+               check_added_monitors!(nodes[0], 1);
+
+               let fee_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &fee_update.update_fee.unwrap());
+               commitment_signed_dance!(nodes[1], nodes[0], fee_update.commitment_signed, false);
+
+               reload_node!(
+                       nodes[1], config, &serialized_node, &[&serialized_monitor], persister, new_chain_monitor, nodes_1_deserialized
+               );
+       }
+
+       // Connect blocks until the HTLC expiry is met, prompting a commitment broadcast by A.
+       connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
+       check_closed_broadcast(&nodes[0], 1, true);
+       check_added_monitors(&nodes[0], 1);
+       check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
+
+       {
+               let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
+               if anchors {
+                       assert_eq!(txn.len(), 1);
+                       let commitment_tx_a = txn.pop().unwrap();
+                       check_spends!(commitment_tx_a, funding_tx);
+               } else {
+                       assert_eq!(txn.len(), 2);
+                       let htlc_tx_a = txn.pop().unwrap();
+                       let commitment_tx_a = txn.pop().unwrap();
+                       check_spends!(commitment_tx_a, funding_tx);
+                       check_spends!(htlc_tx_a, commitment_tx_a);
+               }
+       };
+
+       // B will also broadcast its own commitment.
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id()).unwrap();
+       check_closed_broadcast(&nodes[1], 1, true);
+       check_added_monitors(&nodes[1], 1);
+       check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100_000);
+
+       let commitment_b = {
+               let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+               assert_eq!(txn.len(), 1);
+               let tx = txn.pop().unwrap();
+               check_spends!(tx, funding_tx);
+               tx
+       };
+
+       // Confirm B's commitment, A should now broadcast an HTLC timeout for commitment B.
+       mine_transaction(&nodes[0], &commitment_b);
+       {
+               let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
+               if nodes[0].connect_style.borrow().updates_best_block_first() {
+                       // `commitment_a` and `htlc_timeout_a` are rebroadcast because the best block was
+                       // updated prior to seeing `commitment_b`.
+                       assert_eq!(txn.len(), if anchors { 2 } else { 3 });
+                       check_spends!(txn.last().unwrap(), commitment_b);
+               } else {
+                       assert_eq!(txn.len(), 1);
+                       check_spends!(txn[0], commitment_b);
+               }
+       }
+
+       // Disconnect the block, allowing A to retry its own commitment. Note that we connect two
+       // blocks, one to get us back to the original height, and another to retry our pending claims.
+       disconnect_blocks(&nodes[0], 1);
+       connect_blocks(&nodes[0], 2);
+       {
+               let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+               if anchors {
+                       assert_eq!(txn.len(), 1);
+                       check_spends!(txn[0], funding_tx);
+               } else {
+                       assert_eq!(txn.len(), 2);
+                       check_spends!(txn[0], txn[1]); // HTLC timeout A
+                       check_spends!(txn[1], funding_tx); // Commitment A
+                       assert_ne!(txn[1].txid(), commitment_b.txid());
+               }
+       }
+}
+
+#[test]
+fn test_retries_own_commitment_broadcast_after_reorg() {
+       do_test_retries_own_commitment_broadcast_after_reorg(false, false);
+       do_test_retries_own_commitment_broadcast_after_reorg(false, true);
+       do_test_retries_own_commitment_broadcast_after_reorg(true, false);
+       do_test_retries_own_commitment_broadcast_after_reorg(true, true);
+}
index bc7c013771fb0e049d23283d6fce6f533d1d4ae1..bdd32e90d0ace1cca8b1fd46c4eac99ecb486b61 100644 (file)
@@ -13,10 +13,11 @@ use crate::sign::{EntropySource, SignerProvider};
 use crate::chain::ChannelMonitorUpdateStatus;
 use crate::chain::transaction::OutPoint;
 use crate::events::{MessageSendEvent, HTLCDestination, MessageSendEventsProvider, ClosureReason};
-use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, ChannelShutdownState, ChannelDetails};
+use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, Retry, ChannelShutdownState, ChannelDetails};
 use crate::routing::router::{PaymentParameters, get_route, RouteParameters};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, ErrorAction};
+use crate::ln::onion_utils::INVALID_ONION_BLINDING;
 use crate::ln::script::ShutdownScript;
 use crate::util::test_utils;
 use crate::util::test_utils::OnGetShutdownScriptpubkey;
@@ -276,6 +277,21 @@ fn shutdown_on_unfunded_channel() {
        check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyCoopClosedUnfundedChannel, [nodes[1].node.get_our_node_id()], 1_000_000);
 }
 
+#[test]
+fn close_on_unfunded_channel() {
+       // Test the user asking us to close prior to funding generation
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 100_000, 0, None, None).unwrap();
+       let _open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+       nodes[0].node.close_channel(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1_000_000);
+}
+
 #[test]
 fn expect_channel_shutdown_state_with_force_closure() {
        // Test sending a shutdown prior to channel_ready after funding generation
@@ -401,6 +417,11 @@ fn updates_shutdown_wait() {
 
 #[test]
 fn htlc_fail_async_shutdown() {
+       do_htlc_fail_async_shutdown(true);
+       do_htlc_fail_async_shutdown(false);
+}
+
+fn do_htlc_fail_async_shutdown(blinded_recipient: bool) {
        // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
@@ -409,9 +430,20 @@ fn htlc_fail_async_shutdown() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
        let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 
-       let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
-       nodes[0].node.send_payment_with_route(&route, our_payment_hash,
-               RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
+       let amt_msat = 100000;
+       let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
+       let route_params = if blinded_recipient {
+               crate::ln::blinded_payment_tests::get_blinded_route_parameters(
+                       amt_msat, our_payment_secret,
+                       nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_2.0.contents],
+                       &chanmon_cfgs[2].keys_manager)
+       } else {
+               RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV), amt_msat)
+       };
+       nodes[0].node.send_payment(our_payment_hash,
+               RecipientOnionFields::secret_only(our_payment_secret),
+               PaymentId(our_payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
        check_added_monitors!(nodes[0], 1);
        let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
        assert_eq!(updates.update_add_htlcs.len(), 1);
@@ -441,7 +473,12 @@ fn htlc_fail_async_shutdown() {
        nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
 
-       expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_2.0.contents.short_channel_id, true);
+       if blinded_recipient {
+               expect_payment_failed_conditions(&nodes[0], our_payment_hash, false,
+                       PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+       } else {
+               expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_2.0.contents.short_channel_id, true);
+       }
 
        let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 1);
index 02d68b635363356736b727287f9cf6949c02be02..bb29c76164e1aa7422c5dddda07b8cbd3e7ac01f 100644 (file)
@@ -720,8 +720,7 @@ impl Bolt12Invoice {
                self.contents.verify(TlvStream::new(&self.bytes), key, secp_ctx)
        }
 
-       #[cfg(test)]
-       pub(super) fn as_tlv_stream(&self) -> FullInvoiceTlvStreamRef {
+       pub(crate) fn as_tlv_stream(&self) -> FullInvoiceTlvStreamRef {
                let (payer_tlv_stream, offer_tlv_stream, invoice_request_tlv_stream, invoice_tlv_stream) =
                        self.contents.as_tlv_stream();
                let signature_tlv_stream = SignatureTlvStreamRef {
@@ -1143,7 +1142,6 @@ impl_writeable!(FallbackAddress, { version, program });
 type FullInvoiceTlvStream =
        (PayerTlvStream, OfferTlvStream, InvoiceRequestTlvStream, InvoiceTlvStream, SignatureTlvStream);
 
-#[cfg(test)]
 type FullInvoiceTlvStreamRef<'a> = (
        PayerTlvStreamRef<'a>,
        OfferTlvStreamRef<'a>,
index 9ddd741a1d5428745e2bd1d840fa14eb4a1eee08..4dd85b352f708de598c60c89e91a3b1896ec81a4 100644 (file)
@@ -608,8 +608,7 @@ impl InvoiceRequest {
                })
        }
 
-       #[cfg(test)]
-       fn as_tlv_stream(&self) -> FullInvoiceRequestTlvStreamRef {
+       pub(crate) fn as_tlv_stream(&self) -> FullInvoiceRequestTlvStreamRef {
                let (payer_tlv_stream, offer_tlv_stream, invoice_request_tlv_stream) =
                        self.contents.as_tlv_stream();
                let signature_tlv_stream = SignatureTlvStreamRef {
@@ -811,7 +810,6 @@ tlv_stream!(InvoiceRequestTlvStream, InvoiceRequestTlvStreamRef, INVOICE_REQUEST
 type FullInvoiceRequestTlvStream =
        (PayerTlvStream, OfferTlvStream, InvoiceRequestTlvStream, SignatureTlvStream);
 
-#[cfg(test)]
 type FullInvoiceRequestTlvStreamRef<'a> = (
        PayerTlvStreamRef<'a>,
        OfferTlvStreamRef<'a>,
index 32a0fa3eb26fa3bdf5cff00509a81af819f3be23..b0031a6c3f4e05cd41823ceb9fa785ab3fb91722 100644 (file)
 //! Onion message testing and test utilities live here.
 
 use crate::blinded_path::BlindedPath;
+use crate::events::{Event, EventsProvider};
 use crate::ln::features::InitFeatures;
-use crate::ln::msgs::{self, DecodeError, OnionMessageHandler};
-use crate::sign::{NodeSigner, Recipient};
+use crate::ln::msgs::{self, DecodeError, OnionMessageHandler, SocketAddress};
+use crate::sign::{EntropySource, NodeSigner, Recipient};
 use crate::util::ser::{FixedLengthReader, LengthReadable, Writeable, Writer};
 use crate::util::test_utils;
 use super::{CustomOnionMessageHandler, Destination, MessageRouter, OffersMessage, OffersMessageHandler, OnionMessageContents, OnionMessagePath, OnionMessenger, PendingOnionMessage, SendError};
 
 use bitcoin::network::constants::Network;
 use bitcoin::hashes::hex::FromHex;
-use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey, self};
 
 use crate::io;
 use crate::io_extras::read_to_end;
@@ -28,10 +29,11 @@ use crate::sync::{Arc, Mutex};
 use crate::prelude::*;
 
 struct MessengerNode {
-       keys_manager: Arc<test_utils::TestKeysInterface>,
+       node_id: PublicKey,
+       entropy_source: Arc<test_utils::TestKeysInterface>,
        messenger: OnionMessenger<
                Arc<test_utils::TestKeysInterface>,
-               Arc<test_utils::TestKeysInterface>,
+               Arc<test_utils::TestNodeSigner>,
                Arc<test_utils::TestLogger>,
                Arc<TestMessageRouter>,
                Arc<TestOffersMessageHandler>,
@@ -40,12 +42,6 @@ struct MessengerNode {
        custom_message_handler: Arc<TestCustomMessageHandler>,
 }
 
-impl MessengerNode {
-       fn get_node_pk(&self) -> PublicKey {
-               self.keys_manager.get_node_id(Recipient::Node).unwrap()
-       }
-}
-
 struct TestMessageRouter {}
 
 impl MessageRouter for TestMessageRouter {
@@ -55,8 +51,19 @@ impl MessageRouter for TestMessageRouter {
                Ok(OnionMessagePath {
                        intermediate_nodes: vec![],
                        destination,
+                       first_node_addresses:
+                               Some(vec![SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1000 }]),
                })
        }
+
+       fn create_blinded_paths<
+               ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+       >(
+               &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
+               _secp_ctx: &Secp256k1<T>
+       ) -> Result<Vec<BlindedPath>, ()> {
+               unreachable!()
+       }
 }
 
 struct TestOffersMessageHandler {}
@@ -155,44 +162,69 @@ impl CustomOnionMessageHandler for TestCustomMessageHandler {
 }
 
 fn create_nodes(num_messengers: u8) -> Vec<MessengerNode> {
+       let secrets = (1..=num_messengers)
+               .into_iter()
+               .map(|i| SecretKey::from_slice(&[i; 32]).unwrap())
+               .collect();
+       create_nodes_using_secrets(secrets)
+}
+
+fn create_nodes_using_secrets(secrets: Vec<SecretKey>) -> Vec<MessengerNode> {
        let mut nodes = Vec::new();
-       for i in 0..num_messengers {
+       for (i, secret_key) in secrets.into_iter().enumerate() {
                let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
                let seed = [i as u8; 32];
-               let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet));
+               let entropy_source = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet));
+               let node_signer = Arc::new(test_utils::TestNodeSigner::new(secret_key));
+
                let message_router = Arc::new(TestMessageRouter {});
                let offers_message_handler = Arc::new(TestOffersMessageHandler {});
                let custom_message_handler = Arc::new(TestCustomMessageHandler::new());
                nodes.push(MessengerNode {
-                       keys_manager: keys_manager.clone(),
+                       node_id: node_signer.get_node_id(Recipient::Node).unwrap(),
+                       entropy_source: entropy_source.clone(),
                        messenger: OnionMessenger::new(
-                               keys_manager.clone(), keys_manager, logger.clone(), message_router,
+                               entropy_source, node_signer, logger.clone(), message_router,
                                offers_message_handler, custom_message_handler.clone()
                        ),
                        custom_message_handler,
                });
        }
-       for idx in 0..num_messengers - 1 {
-               let i = idx as usize;
-               let mut features = InitFeatures::empty();
-               features.set_onion_messages_optional();
-               let init_msg = msgs::Init { features, networks: None, remote_network_address: None };
-               nodes[i].messenger.peer_connected(&nodes[i + 1].get_node_pk(), &init_msg.clone(), true).unwrap();
-               nodes[i + 1].messenger.peer_connected(&nodes[i].get_node_pk(), &init_msg.clone(), false).unwrap();
+       for i in 0..nodes.len() - 1 {
+               connect_peers(&nodes[i], &nodes[i + 1]);
        }
        nodes
 }
 
+fn connect_peers(node_a: &MessengerNode, node_b: &MessengerNode) {
+       let mut features = InitFeatures::empty();
+       features.set_onion_messages_optional();
+       let init_msg = msgs::Init { features, networks: None, remote_network_address: None };
+       node_a.messenger.peer_connected(&node_b.node_id, &init_msg.clone(), true).unwrap();
+       node_b.messenger.peer_connected(&node_a.node_id, &init_msg.clone(), false).unwrap();
+}
+
+fn disconnect_peers(node_a: &MessengerNode, node_b: &MessengerNode) {
+       node_a.messenger.peer_disconnected(&node_b.node_id);
+       node_b.messenger.peer_disconnected(&node_a.node_id);
+}
+
+fn release_events(node: &MessengerNode) -> Vec<Event> {
+       let events = core::cell::RefCell::new(Vec::new());
+       node.messenger.process_pending_events(&|e| events.borrow_mut().push(e));
+       events.into_inner()
+}
+
 fn pass_along_path(path: &Vec<MessengerNode>) {
        let mut prev_node = &path[0];
        for node in path.into_iter().skip(1) {
                let events = prev_node.messenger.release_pending_msgs();
                let onion_msg =  {
-                       let msgs = events.get(&node.get_node_pk()).unwrap();
+                       let msgs = events.get(&node.node_id).unwrap();
                        assert_eq!(msgs.len(), 1);
                        msgs[0].clone()
                };
-               node.messenger.handle_onion_message(&prev_node.get_node_pk(), &onion_msg);
+               node.messenger.handle_onion_message(&prev_node.node_id, &onion_msg);
                prev_node = node;
        }
 }
@@ -204,9 +236,10 @@ fn one_unblinded_hop() {
 
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
-               destination: Destination::Node(nodes[1].get_node_pk()),
+               destination: Destination::Node(nodes[1].node_id),
+               first_node_addresses: None,
        };
-       nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
        nodes[1].custom_message_handler.expect_message(TestCustomMessage::Response);
        pass_along_path(&nodes);
 }
@@ -217,10 +250,11 @@ fn two_unblinded_hops() {
        let test_msg = TestCustomMessage::Response;
 
        let path = OnionMessagePath {
-               intermediate_nodes: vec![nodes[1].get_node_pk()],
-               destination: Destination::Node(nodes[2].get_node_pk()),
+               intermediate_nodes: vec![nodes[1].node_id],
+               destination: Destination::Node(nodes[2].node_id),
+               first_node_addresses: None,
        };
-       nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
        nodes[2].custom_message_handler.expect_message(TestCustomMessage::Response);
        pass_along_path(&nodes);
 }
@@ -231,12 +265,13 @@ fn one_blinded_hop() {
        let test_msg = TestCustomMessage::Response;
 
        let secp_ctx = Secp256k1::new();
-       let blinded_path = BlindedPath::new_for_message(&[nodes[1].get_node_pk()], &*nodes[1].keys_manager, &secp_ctx).unwrap();
+       let blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id], &*nodes[1].entropy_source, &secp_ctx).unwrap();
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
                destination: Destination::BlindedPath(blinded_path),
+               first_node_addresses: None,
        };
-       nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
        nodes[1].custom_message_handler.expect_message(TestCustomMessage::Response);
        pass_along_path(&nodes);
 }
@@ -247,13 +282,14 @@ fn two_unblinded_two_blinded() {
        let test_msg = TestCustomMessage::Response;
 
        let secp_ctx = Secp256k1::new();
-       let blinded_path = BlindedPath::new_for_message(&[nodes[3].get_node_pk(), nodes[4].get_node_pk()], &*nodes[4].keys_manager, &secp_ctx).unwrap();
+       let blinded_path = BlindedPath::new_for_message(&[nodes[3].node_id, nodes[4].node_id], &*nodes[4].entropy_source, &secp_ctx).unwrap();
        let path = OnionMessagePath {
-               intermediate_nodes: vec![nodes[1].get_node_pk(), nodes[2].get_node_pk()],
+               intermediate_nodes: vec![nodes[1].node_id, nodes[2].node_id],
                destination: Destination::BlindedPath(blinded_path),
+               first_node_addresses: None,
        };
 
-       nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
        nodes[4].custom_message_handler.expect_message(TestCustomMessage::Response);
        pass_along_path(&nodes);
 }
@@ -264,13 +300,14 @@ fn three_blinded_hops() {
        let test_msg = TestCustomMessage::Response;
 
        let secp_ctx = Secp256k1::new();
-       let blinded_path = BlindedPath::new_for_message(&[nodes[1].get_node_pk(), nodes[2].get_node_pk(), nodes[3].get_node_pk()], &*nodes[3].keys_manager, &secp_ctx).unwrap();
+       let blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id, nodes[2].node_id, nodes[3].node_id], &*nodes[3].entropy_source, &secp_ctx).unwrap();
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
                destination: Destination::BlindedPath(blinded_path),
+               first_node_addresses: None,
        };
 
-       nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
        nodes[3].custom_message_handler.expect_message(TestCustomMessage::Response);
        pass_along_path(&nodes);
 }
@@ -281,13 +318,14 @@ fn too_big_packet_error() {
        let nodes = create_nodes(2);
        let test_msg = TestCustomMessage::Response;
 
-       let hop_node_id = nodes[1].get_node_pk();
+       let hop_node_id = nodes[1].node_id;
        let hops = vec![hop_node_id; 400];
        let path = OnionMessagePath {
                intermediate_nodes: hops,
                destination: Destination::Node(hop_node_id),
+               first_node_addresses: None,
        };
-       let err = nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap_err();
+       let err = nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap_err();
        assert_eq!(err, SendError::TooBigPacket);
 }
 
@@ -299,23 +337,25 @@ fn we_are_intro_node() {
        let test_msg = TestCustomMessage::Response;
 
        let secp_ctx = Secp256k1::new();
-       let blinded_path = BlindedPath::new_for_message(&[nodes[0].get_node_pk(), nodes[1].get_node_pk(), nodes[2].get_node_pk()], &*nodes[2].keys_manager, &secp_ctx).unwrap();
+       let blinded_path = BlindedPath::new_for_message(&[nodes[0].node_id, nodes[1].node_id, nodes[2].node_id], &*nodes[2].entropy_source, &secp_ctx).unwrap();
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
                destination: Destination::BlindedPath(blinded_path),
+               first_node_addresses: None,
        };
 
-       nodes[0].messenger.send_onion_message(path, test_msg.clone(), None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg.clone(), None).unwrap();
        nodes[2].custom_message_handler.expect_message(TestCustomMessage::Response);
        pass_along_path(&nodes);
 
        // Try with a two-hop blinded path where we are the introduction node.
-       let blinded_path = BlindedPath::new_for_message(&[nodes[0].get_node_pk(), nodes[1].get_node_pk()], &*nodes[1].keys_manager, &secp_ctx).unwrap();
+       let blinded_path = BlindedPath::new_for_message(&[nodes[0].node_id, nodes[1].node_id], &*nodes[1].entropy_source, &secp_ctx).unwrap();
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
                destination: Destination::BlindedPath(blinded_path),
+               first_node_addresses: None,
        };
-       nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
        nodes[1].custom_message_handler.expect_message(TestCustomMessage::Response);
        nodes.remove(2);
        pass_along_path(&nodes);
@@ -329,13 +369,14 @@ fn invalid_blinded_path_error() {
 
        // 0 hops
        let secp_ctx = Secp256k1::new();
-       let mut blinded_path = BlindedPath::new_for_message(&[nodes[1].get_node_pk(), nodes[2].get_node_pk()], &*nodes[2].keys_manager, &secp_ctx).unwrap();
+       let mut blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id, nodes[2].node_id], &*nodes[2].entropy_source, &secp_ctx).unwrap();
        blinded_path.blinded_hops.clear();
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
                destination: Destination::BlindedPath(blinded_path),
+               first_node_addresses: None,
        };
-       let err = nodes[0].messenger.send_onion_message(path, test_msg.clone(), None).unwrap_err();
+       let err = nodes[0].messenger.send_onion_message_using_path(path, test_msg.clone(), None).unwrap_err();
        assert_eq!(err, SendError::TooFewBlindedHops);
 }
 
@@ -347,11 +388,12 @@ fn reply_path() {
 
        // Destination::Node
        let path = OnionMessagePath {
-               intermediate_nodes: vec![nodes[1].get_node_pk(), nodes[2].get_node_pk()],
-               destination: Destination::Node(nodes[3].get_node_pk()),
+               intermediate_nodes: vec![nodes[1].node_id, nodes[2].node_id],
+               destination: Destination::Node(nodes[3].node_id),
+               first_node_addresses: None,
        };
-       let reply_path = BlindedPath::new_for_message(&[nodes[2].get_node_pk(), nodes[1].get_node_pk(), nodes[0].get_node_pk()], &*nodes[0].keys_manager, &secp_ctx).unwrap();
-       nodes[0].messenger.send_onion_message(path, test_msg.clone(), Some(reply_path)).unwrap();
+       let reply_path = BlindedPath::new_for_message(&[nodes[2].node_id, nodes[1].node_id, nodes[0].node_id], &*nodes[0].entropy_source, &secp_ctx).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg.clone(), Some(reply_path)).unwrap();
        nodes[3].custom_message_handler.expect_message(TestCustomMessage::Request);
        pass_along_path(&nodes);
        // Make sure the last node successfully decoded the reply path.
@@ -360,14 +402,15 @@ fn reply_path() {
        pass_along_path(&nodes);
 
        // Destination::BlindedPath
-       let blinded_path = BlindedPath::new_for_message(&[nodes[1].get_node_pk(), nodes[2].get_node_pk(), nodes[3].get_node_pk()], &*nodes[3].keys_manager, &secp_ctx).unwrap();
+       let blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id, nodes[2].node_id, nodes[3].node_id], &*nodes[3].entropy_source, &secp_ctx).unwrap();
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
                destination: Destination::BlindedPath(blinded_path),
+               first_node_addresses: None,
        };
-       let reply_path = BlindedPath::new_for_message(&[nodes[2].get_node_pk(), nodes[1].get_node_pk(), nodes[0].get_node_pk()], &*nodes[0].keys_manager, &secp_ctx).unwrap();
+       let reply_path = BlindedPath::new_for_message(&[nodes[2].node_id, nodes[1].node_id, nodes[0].node_id], &*nodes[0].entropy_source, &secp_ctx).unwrap();
 
-       nodes[0].messenger.send_onion_message(path, test_msg, Some(reply_path)).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, Some(reply_path)).unwrap();
        nodes[3].custom_message_handler.expect_message(TestCustomMessage::Request);
        pass_along_path(&nodes);
 
@@ -381,6 +424,7 @@ fn reply_path() {
 fn invalid_custom_message_type() {
        let nodes = create_nodes(2);
 
+       #[derive(Debug)]
        struct InvalidCustomMessage{}
        impl OnionMessageContents for InvalidCustomMessage {
                fn tlv_type(&self) -> u64 {
@@ -396,9 +440,10 @@ fn invalid_custom_message_type() {
        let test_msg = InvalidCustomMessage {};
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
-               destination: Destination::Node(nodes[1].get_node_pk()),
+               destination: Destination::Node(nodes[1].node_id),
+               first_node_addresses: None,
        };
-       let err = nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap_err();
+       let err = nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap_err();
        assert_eq!(err, SendError::InvalidMessage);
 }
 
@@ -408,12 +453,13 @@ fn peer_buffer_full() {
        let test_msg = TestCustomMessage::Request;
        let path = OnionMessagePath {
                intermediate_nodes: vec![],
-               destination: Destination::Node(nodes[1].get_node_pk()),
+               destination: Destination::Node(nodes[1].node_id),
+               first_node_addresses: None,
        };
        for _ in 0..188 { // Based on MAX_PER_PEER_BUFFER_SIZE in OnionMessenger
-               nodes[0].messenger.send_onion_message(path.clone(), test_msg.clone(), None).unwrap();
+               nodes[0].messenger.send_onion_message_using_path(path.clone(), test_msg.clone(), None).unwrap();
        }
-       let err = nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap_err();
+       let err = nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap_err();
        assert_eq!(err, SendError::BufferFull);
 }
 
@@ -427,51 +473,97 @@ fn many_hops() {
 
        let mut intermediate_nodes = vec![];
        for i in 1..(num_nodes-1) {
-               intermediate_nodes.push(nodes[i].get_node_pk());
+               intermediate_nodes.push(nodes[i].node_id);
        }
 
        let path = OnionMessagePath {
                intermediate_nodes,
-               destination: Destination::Node(nodes[num_nodes-1].get_node_pk()),
+               destination: Destination::Node(nodes[num_nodes-1].node_id),
+               first_node_addresses: None,
        };
-       nodes[0].messenger.send_onion_message(path, test_msg, None).unwrap();
+       nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
        nodes[num_nodes-1].custom_message_handler.expect_message(TestCustomMessage::Response);
        pass_along_path(&nodes);
 }
 
 #[test]
-fn spec_test_vector() {
-       let keys_mgrs = vec![
-               (Arc::new(test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet)), // Alice
-                Arc::new(test_utils::TestNodeSigner::new(SecretKey::from_slice(&<Vec<u8>>::from_hex("4141414141414141414141414141414141414141414141414141414141414141").unwrap()).unwrap()))),
-               (Arc::new(test_utils::TestKeysInterface::new(&[1; 32], Network::Testnet)), // Bob
-                Arc::new(test_utils::TestNodeSigner::new(SecretKey::from_slice(&<Vec<u8>>::from_hex("4242424242424242424242424242424242424242424242424242424242424242").unwrap()).unwrap()))),
-               (Arc::new(test_utils::TestKeysInterface::new(&[2; 32], Network::Testnet)), // Carol
-                Arc::new(test_utils::TestNodeSigner::new(SecretKey::from_slice(&<Vec<u8>>::from_hex("4343434343434343434343434343434343434343434343434343434343434343").unwrap()).unwrap()))),
-               (Arc::new(test_utils::TestKeysInterface::new(&[3; 32], Network::Testnet)), // Dave
-                Arc::new(test_utils::TestNodeSigner::new(SecretKey::from_slice(&<Vec<u8>>::from_hex("4444444444444444444444444444444444444444444444444444444444444444").unwrap()).unwrap()))),
-       ];
-       let message_router = Arc::new(TestMessageRouter {});
-       let offers_message_handler = Arc::new(TestOffersMessageHandler {});
-       let custom_message_handler = Arc::new(TestCustomMessageHandler::new());
-       let mut nodes = Vec::new();
-       for (idx, (entropy_source, node_signer)) in keys_mgrs.iter().enumerate() {
-               let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", idx)));
-               nodes.push(OnionMessenger::new(
-                       entropy_source.clone(), node_signer.clone(), logger.clone(), message_router.clone(),
-                       offers_message_handler.clone(), custom_message_handler.clone()
-               ));
+fn requests_peer_connection_for_buffered_messages() {
+       let nodes = create_nodes(3);
+       let message = TestCustomMessage::Request;
+       let secp_ctx = Secp256k1::new();
+       let blinded_path = BlindedPath::new_for_message(
+               &[nodes[1].node_id, nodes[2].node_id], &*nodes[0].entropy_source, &secp_ctx
+       ).unwrap();
+       let destination = Destination::BlindedPath(blinded_path);
+
+       // Buffer an onion message for a connected peer
+       nodes[0].messenger.send_onion_message(message.clone(), destination.clone(), None).unwrap();
+       assert!(release_events(&nodes[0]).is_empty());
+       assert!(nodes[0].messenger.next_onion_message_for_peer(nodes[1].node_id).is_some());
+       assert!(nodes[0].messenger.next_onion_message_for_peer(nodes[1].node_id).is_none());
+
+       // Buffer an onion message for a disconnected peer
+       disconnect_peers(&nodes[0], &nodes[1]);
+       assert!(nodes[0].messenger.next_onion_message_for_peer(nodes[1].node_id).is_none());
+       nodes[0].messenger.send_onion_message(message, destination, None).unwrap();
+
+       // Check that a ConnectionNeeded event for the peer is provided
+       let events = release_events(&nodes[0]);
+       assert_eq!(events.len(), 1);
+       match &events[0] {
+               Event::ConnectionNeeded { node_id, .. } => assert_eq!(*node_id, nodes[1].node_id),
+               e => panic!("Unexpected event: {:?}", e),
        }
-       for idx in 0..nodes.len() - 1 {
-               let i = idx as usize;
-               let mut features = InitFeatures::empty();
-               features.set_onion_messages_optional();
-               let init_msg = msgs::Init { features, networks: None, remote_network_address: None };
-               nodes[i].peer_connected(
-                       &keys_mgrs[i + 1].1.get_node_id(Recipient::Node).unwrap(), &init_msg.clone(), true).unwrap();
-               nodes[i + 1].peer_connected(
-                       &keys_mgrs[i].1.get_node_id(Recipient::Node).unwrap(), &init_msg.clone(), false).unwrap();
+
+       // Release the buffered onion message when reconnected
+       connect_peers(&nodes[0], &nodes[1]);
+       assert!(nodes[0].messenger.next_onion_message_for_peer(nodes[1].node_id).is_some());
+       assert!(nodes[0].messenger.next_onion_message_for_peer(nodes[1].node_id).is_none());
+}
+
+#[test]
+fn drops_buffered_messages_waiting_for_peer_connection() {
+       let nodes = create_nodes(3);
+       let message = TestCustomMessage::Request;
+       let secp_ctx = Secp256k1::new();
+       let blinded_path = BlindedPath::new_for_message(
+               &[nodes[1].node_id, nodes[2].node_id], &*nodes[0].entropy_source, &secp_ctx
+       ).unwrap();
+       let destination = Destination::BlindedPath(blinded_path);
+
+       // Buffer an onion message for a disconnected peer
+       disconnect_peers(&nodes[0], &nodes[1]);
+       nodes[0].messenger.send_onion_message(message, destination, None).unwrap();
+
+       // Release the event so the timer can start ticking
+       let events = release_events(&nodes[0]);
+       assert_eq!(events.len(), 1);
+       match &events[0] {
+               Event::ConnectionNeeded { node_id, .. } => assert_eq!(*node_id, nodes[1].node_id),
+               e => panic!("Unexpected event: {:?}", e),
+       }
+
+       // Drop buffered messages for a disconnected peer after some timer ticks
+       use crate::onion_message::messenger::MAX_TIMER_TICKS;
+       for _ in 0..=MAX_TIMER_TICKS {
+               nodes[0].messenger.timer_tick_occurred();
        }
+       connect_peers(&nodes[0], &nodes[1]);
+       assert!(nodes[0].messenger.next_onion_message_for_peer(nodes[1].node_id).is_none());
+}
+
+#[test]
+fn spec_test_vector() {
+       let secret_keys = [
+               "4141414141414141414141414141414141414141414141414141414141414141", // Alice
+               "4242424242424242424242424242424242424242424242424242424242424242", // Bob
+               "4343434343434343434343434343434343434343434343434343434343434343", // Carol
+               "4444444444444444444444444444444444444444444444444444444444444444", // Dave
+       ]
+               .iter()
+               .map(|secret| SecretKey::from_slice(&<Vec<u8>>::from_hex(secret).unwrap()).unwrap())
+               .collect();
+       let nodes = create_nodes_using_secrets(secret_keys);
 
        // Hardcode the sender->Alice onion message, because it includes an unknown TLV of type 1, which
        // LDK doesn't support constructing.
@@ -490,24 +582,18 @@ fn spec_test_vector() {
        // which is why the asserted strings differ slightly from the spec.
        assert_eq!(sender_to_alice_om.encode(), <Vec<u8>>::from_hex("031195a8046dcbb8e17034bca630065e7a0982e4e36f6f7e5a8d4554e4846fcd9905560002531fe6068134503d2723133227c867ac8fa6c83c537e9a44c3c5bdbdcb1fe33793b828776d70aabbd8cef1a5b52d5a397ae1a20f20435ff6057cd8be339d5aee226660ef73b64afa45dbf2e6e8e26eb96a259b2db5aeecda1ce2e768bbc35d389d7f320ca3d2bd14e2689bef2f5ac0307eaaabc1924eb972c1563d4646ae131accd39da766257ed35ea36e4222527d1db4fa7b2000aab9eafcceed45e28b5560312d4e2299bd8d1e7fe27d10925966c28d497aec400b4630485e82efbabc00550996bdad5d6a9a8c75952f126d14ad2cff91e16198691a7ef2937de83209285f1fb90944b4e46bca7c856a9ce3da10cdf2a7d00dc2bf4f114bc4d3ed67b91cbde558ce9af86dc81fbdc37f8e301b29e23c1466659c62bdbf8cff5d4c20f0fb0851ec72f5e9385dd40fdd2e3ed67ca4517117825665e50a3e26f73c66998daf18e418e8aef9ce2d20da33c3629db2933640e03e7b44c2edf49e9b482db7b475cfd4c617ae1d46d5c24d697846f9f08561eac2b065f9b382501f6eabf07343ed6c602f61eab99cdb52adf63fd44a8db2d3016387ea708fc1c08591e19b4d9984ebe31edbd684c2ea86526dd8c7732b1d8d9117511dc1b643976d356258fce8313b1cb92682f41ab72dedd766f06de375f9edacbcd0ca8c99b865ea2b7952318ea1fd20775a28028b5cf59dece5de14f615b8df254eee63493a5111ea987224bea006d8f1b60d565eef06ac0da194dba2a6d02e79b2f2f34e9ca6e1984a507319d86e9d4fcaeea41b4b9144e0b1826304d4cc1da61cfc5f8b9850697df8adc5e9d6f3acb3219b02764b4909f2b2b22e799fd66c383414a84a7d791b899d4aa663770009eb122f90282c8cb9cda16aba6897edcf9b32951d0080c0f52be3ca011fbec3fb16423deb47744645c3b05fdbd932edf54ba6efd26e65340a8e9b1d1216582e1b30d64524f8ca2d6c5ba63a38f7120a3ed71bed8960bcac2feee2dd41c90be48e3c11ec518eb3d872779e4765a6cc28c6b0fa71ab57ced73ae963cc630edae4258cba2bf25821a6ae049fec2fca28b5dd1bb004d92924b65701b06dcf37f0ccd147a13a03f9bc0f98b7d78fe9058089756931e2cd0e0ed92ec6759d07b248069526c67e9e6ce095118fd3501ba0f858ef030b76c6f6beb11a09317b5ad25343f4b31aef02bc555951bc7791c2c289ecf94d5544dcd6ad3021ed8e8e3db34b2a73e1eedb57b578b068a5401836d6e382110b73690a94328c404af25e85a8d6b808893d1b71af6a31fadd8a8cc6e31ecc0d9ff7e6b91fd03c274a5c1f1ccd25b61150220a3fddb04c91012f5f7a83a5c90deb2470089d6e38cd5914b9c946eca6e9d31bbf8667d36cf87effc3f3ff283c21dd4137bd569fe7cf758feac94053e4baf7338bb592c8b7c291667fadf4a9bf9a2a154a18f612cbc7f851b3f8f2070e0a9d180622ee4f8e81b0ab250d504cef24116a3ff188cc829fcd8610b56343569e8dc997629410d1967ca9dd1d27eec5e01e4375aad16c46faba268524b154850d0d6fe3a76af2c6aa3e97647c51036049ac565370028d6a439a2672b6face56e1b171496c0722cfa22d9da631be359661617c5d5a2d286c5e19db9452c1e21a0107b6400debda2decb0c838f342dd017cdb2dccdf1fe97e3df3f881856b546997a3fed9e279c720145101567dd56be21688fed66bf9759e432a9aa89cbbd225d13cdea4ca05f7a45cfb6a682a3d5b1e18f7e6cf934fae5098108bae9058d05c3387a01d8d02a656d2bfff67e9f46b2d8a6aac28129e52efddf6e552214c3f8a45bc7a912cca9a7fec1d7d06412c6972cb9e3dc518983f56530b8bffe7f92c4b6eb47d4aef59fb513c4653a42de61bc17ad7728e7fc7590ff05a9e991de03f023d0aaf8688ed6170def5091c66576a424ac1cb").unwrap());
        let sender_dummy_node_id = PublicKey::from_slice(&[2; 33]).unwrap();
-       nodes[0].handle_onion_message(&sender_dummy_node_id, &sender_to_alice_om);
-       let alice_to_bob_om = nodes[0].next_onion_message_for_peer(
-               keys_mgrs[1].1.get_node_id(Recipient::Node).unwrap()).unwrap();
+       nodes[0].messenger.handle_onion_message(&sender_dummy_node_id, &sender_to_alice_om);
+       let alice_to_bob_om = nodes[0].messenger.next_onion_message_for_peer(nodes[1].node_id).unwrap();
        assert_eq!(alice_to_bob_om.encode(), <Vec<u8>>::from_hex("031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f05560002536d53f93796cad550b6c68662dca41f7e8c221c31022c64dd1a627b2df3982b25eac261e88369cfc66e1e3b6d9829cb3dcd707046e68a7796065202a7904811bf2608c5611cf74c9eb5371c7eb1a4428bb39a041493e2a568ddb0b2482a6cc6711bc6116cef144ebf988073cb18d9dd4ce2d3aa9de91a7dc6d7c6f11a852024626e66b41ba1158055505dff9cb15aa51099f315564d9ee3ed6349665dc3e209eedf9b5805ee4f69d315df44c80e63d0e2efbdab60ec96f44a3447c6a6ddb1efb6aa4e072bde1dab974081646bfddf3b02daa2b83847d74dd336465e76e9b8fecc2b0414045eeedfc39939088a76820177dd1103c99939e659beb07197bab9f714b30ba8dc83738e9a6553a57888aaeda156c68933a2f4ff35e3f81135076b944ed9856acbfee9c61299a5d1763eadd14bf5eaf71304c8e165e590d7ecbcd25f1650bf5b6c2ad1823b2dc9145e168974ecf6a2273c94decff76d94bc6708007a17f22262d63033c184d0166c14f41b225a956271947aae6ce65890ed8f0d09c6ffe05ec02ee8b9de69d7077a0c5adeb813aabcc1ba8975b73ab06ddea5f4db3c23a1de831602de2b83f990d4133871a1a81e53f86393e6a7c3a7b73f0c099fa72afe26c3027bb9412338a19303bd6e6591c04fb4cde9b832b5f41ae199301ea8c303b5cef3aca599454273565de40e1148156d1f97c1aa9e58459ab318304075e034f5b7899c12587b86776a18a1da96b7bcdc22864fccc4c41538ebce92a6f054d53bf46770273a70e75fe0155cd6d2f2e937465b0825ce3123b8c206fac4c30478fa0f08a97ade7216dce11626401374993213636e93545a31f500562130f2feb04089661ad8c34d5a4cbd2e4e426f37cb094c786198a220a2646ecadc38c04c29ee67b19d662c209a7b30bfecc7fe8bf7d274de0605ee5df4db490f6d32234f6af639d3fce38a2801bcf8d51e9c090a6c6932355a83848129a378095b34e71cb8f51152dc035a4fe8e802fec8de221a02ba5afd6765ce570bef912f87357936ea0b90cb2990f56035e89539ec66e8dbd6ed50835158614096990e019c3eba3d7dd6a77147641c6145e8b17552cd5cf7cd163dd40b9eaeba8c78e03a2cd8c0b7997d6f56d35f38983a202b4eb8a54e14945c4de1a6dde46167e11708b7a5ff5cb9c0f7fc12fae49a012aa90bb1995c038130b749c48e6f1ffb732e92086def42af10fbc460d94abeb7b2fa744a5e9a491d62a08452be8cf2fdef573deedc1fe97098bce889f98200b26f9bb99da9aceddda6d793d8e0e44a2601ef4590cfbb5c3d0197aac691e3d31c20fd8e38764962ca34dabeb85df28feabaf6255d4d0df3d814455186a84423182caa87f9673df770432ad8fdfe78d4888632d460d36d2719e8fa8e4b4ca10d817c5d6bc44a8b2affab8c2ba53b8bf4994d63286c2fad6be04c28661162fa1a67065ecda8ba8c13aee4a8039f4f0110e0c0da2366f178d8903e19136dad6df9d8693ce71f3a270f9941de2a93d9b67bc516207ac1687bf6e00b29723c42c7d9c90df9d5e599dbeb7b73add0a6a2b7aba82f98ac93cb6e60494040445229f983a81c34f7f686d166dfc98ec23a6318d4a02a311ac28d655ea4e0f9c3014984f31e621ef003e98c373561d9040893feece2e0fa6cd2dd565e6fbb2773a2407cb2c3273c306cf71f427f2e551c4092e067cf9869f31ac7c6c80dd52d4f85be57a891a41e34be0d564e39b4af6f46b85339254a58b205fb7e10e7d0470ee73622493f28c08962118c23a1198467e72c4ae1cd482144b419247a5895975ea90d135e2a46ef7e5794a1551a447ff0a0d299b66a7f565cd86531f5e7af5408d85d877ce95b1df12b88b7d5954903a5296325ba478ba1e1a9d1f30a2d5052b2e2889bbd64f72c72bc71d8817288a2").unwrap());
-       nodes[1].handle_onion_message(
-               &keys_mgrs[0].1.get_node_id(Recipient::Node).unwrap(), &alice_to_bob_om);
-       let bob_to_carol_om = nodes[1].next_onion_message_for_peer(
-               keys_mgrs[2].1.get_node_id(Recipient::Node).unwrap()).unwrap();
+       nodes[1].messenger.handle_onion_message(&nodes[0].node_id, &alice_to_bob_om);
+       let bob_to_carol_om = nodes[1].messenger.next_onion_message_for_peer(nodes[2].node_id).unwrap();
        assert_eq!(bob_to_carol_om.encode(), <Vec<u8>>::from_hex("02b684babfd400c8dd48b367e9754b8021a3594a34dc94d7101776c7f6a86d0582055600029a77e8523162efa1f4208f4f2050cd5c386ddb6ce6d36235ea569d217ec52209fb85fdf7dbc4786c373eebdba0ddc184cfbe6da624f610e93f62c70f2c56be1090b926359969f040f932c03f53974db5656233bd60af375517d4323002937d784c2c88a564bcefe5c33d3fc21c26d94dfacab85e2e19685fd2ff4c543650958524439b6da68779459aee5ffc9dc543339acec73ff43be4c44ddcbe1c11d50e2411a67056ba9db7939d780f5a86123fdd3abd6f075f7a1d78ab7daf3a82798b7ec1e9f1345bc0d1e935098497067e2ae5a51ece396fcb3bb30871ad73aee51b2418b39f00c8e8e22be4a24f4b624e09cb0414dd46239de31c7be035f71e8da4f5a94d15b44061f46414d3f355069b5c5b874ba56704eb126148a22ec873407fe118972127e63ff80e682e410f297f23841777cec0517e933eaf49d7e34bd203266b42081b3a5193b51ccd34b41342bc67cf73523b741f5c012ba2572e9dda15fbe131a6ac2ff24dc2a7622d58b9f3553092cfae7fae3c8864d95f97aa49ec8edeff5d9f5782471160ee412d82ff6767030fc63eec6a93219a108cd41433834b26676a39846a944998796c79cd1cc460531b8ded659cedfd8aecefd91944f00476f1496daafb4ea6af3feacac1390ea510709783c2aa81a29de27f8959f6284f4684102b17815667cbb0645396ac7d542b878d90c42a1f7f00c4c4eedb2a22a219f38afadb4f1f562b6e000a94e75cc38f535b43a3c0384ccef127fde254a9033a317701c710b2b881065723486e3f4d3eea5e12f374a41565fe43fa137c1a252c2153dde055bb343344c65ad0529010ece29bbd405effbebfe3ba21382b94a60ac1a5ffa03f521792a67b30773cb42e862a8a02a8bbd41b842e115969c87d1ff1f8c7b5726b9f20772dd57fe6e4ea41f959a2a673ffad8e2f2a472c4c8564f3a5a47568dd75294b1c7180c500f7392a7da231b1fe9e525ea2d7251afe9ca52a17fe54a116cb57baca4f55b9b6de915924d644cba9dade4ccc01939d7935749c008bafc6d3ad01cd72341ce5ddf7a5d7d21cf0465ab7a3233433aef21f9acf2bfcdc5a8cc003adc4d82ac9d72b36eb74e05c9aa6ccf439ac92e6b84a3191f0764dd2a2e0b4cc3baa08782b232ad6ecd3ca6029bc08cc094aef3aebddcaddc30070cb6023a689641de86cfc6341c8817215a4650f844cd2ca60f2f10c6e44cfc5f23912684d4457bf4f599879d30b79bf12ef1ab8d34dddc15672b82e56169d4c770f0a2a7a960b1e8790773f5ff7fce92219808f16d061cc85e053971213676d28fb48925e9232b66533dbd938458eb2cc8358159df7a2a2e4cf87500ede2afb8ce963a845b98978edf26a6948d4932a6b95d022004556d25515fe158092ce9a913b4b4a493281393ca731e8d8e5a3449b9d888fc4e73ffcbb9c6d6d66e88e03cf6e81a0496ede6e4e4172b08c000601993af38f80c7f68c9d5fff9e0e215cff088285bf039ca731744efcb7825a272ca724517736b4890f47e306b200aa2543c363e2c9090bcf3cf56b5b86868a62471c7123a41740392fc1d5ab28da18dca66618e9af7b42b62b23aba907779e73ca03ec60e6ab9e0484b9cae6578e0fddb6386cb3468506bf6420298bf4a690947ab582255551d82487f271101c72e19e54872ab47eae144db66bc2f8194a666a5daec08d12822cb83a61946234f2dfdbd6ca7d8763e6818adee7b401fcdb1ac42f9df1ac5cc5ac131f2869013c8d6cd29d4c4e3d05bccd34ca83366d616296acf854fa05149bfd763a25b9938e96826a037fdcb85545439c76df6beed3bdbd01458f9cf984997cc4f0a7ac3cc3f5e1eeb59c09cadcf5a537f16e444149c8f17d4bdaef16c9fbabc5ef06eb0f0bf3a07a1beddfeacdaf1df5582d6dbd6bb808d6ab31bc22e5d7").unwrap());
-       nodes[2].handle_onion_message(
-               &keys_mgrs[1].1.get_node_id(Recipient::Node).unwrap(), &bob_to_carol_om);
-       let carol_to_dave_om = nodes[2].next_onion_message_for_peer(
-               keys_mgrs[3].1.get_node_id(Recipient::Node).unwrap()).unwrap();
+       nodes[2].messenger.handle_onion_message(&nodes[1].node_id, &bob_to_carol_om);
+       let carol_to_dave_om = nodes[2].messenger.next_onion_message_for_peer(nodes[3].node_id).unwrap();
        assert_eq!(carol_to_dave_om.encode(), <Vec<u8>>::from_hex("025aaca62db7ce6b46386206ef9930daa32e979a35cb185a41cb951aa7d254b03c055600025550b2910294fa73bda99b9de9c851be9cbb481e23194a1743033630efba546b86e7d838d0f6e9cc0ed088dbf6889f0dceca3bfc745bd77d013a31311fa932a8bf1d28387d9ff521eabc651dee8f861fed609a68551145a451f017ec44978addeee97a423c08445531da488fd1ddc998e9cdbfcea59517b53fbf1833f0bbe6188dba6ca773a247220ec934010daca9cc185e1ceb136803469baac799e27a0d82abe53dc48a06a55d1f643885cc7894677dd20a4e4152577d1ba74b870b9279f065f9b340cedb3ca13b7df218e853e10ccd1b59c42a2acf93f489e170ee4373d30ab158b60fc20d3ba73a1f8c750951d69fb5b9321b968ddc8114936412346aff802df65516e1c09c51ef19849ff36c0199fd88c8bec301a30fef0c7cb497901c038611303f64e4174b5daf42832aa5586b84d2c9b95f382f4269a5d1bd4be898618dc78dfd451170f72ca16decac5b03e60702112e439cadd104fb3bbb3d5023c9b80823fdcd0a212a7e1aaa6eeb027adc7f8b3723031d135a09a979a4802788bb7861c6cc85501fb91137768b70aeab309b27b885686604ffc387004ac4f8c44b101c39bc0597ef7fd957f53fc5051f534b10eb3852100962b5e58254e5558689913c26ad6072ea41f5c5db10077cfc91101d4ae393be274c74297da5cc381cd88d54753aaa7df74b2f9da8d88a72bc9218fcd1f19e4ff4aace182312b9509c5175b6988f044c5756d232af02a451a02ca752f3c52747773acff6fd07d2032e6ce562a2c42105d106eba02d0b1904182cdc8c74875b082d4989d3a7e9f0e73de7c75d357f4af976c28c0b206c5e8123fc2391d078592d0d5ff686fd245c0a2de2e535b7cca99c0a37d432a8657393a9e3ca53eec1692159046ba52cb9bc97107349d8673f74cbc97e231f1108005c8d03e24ca813cea2294b39a7a493bcc062708f1f6cf0074e387e7d50e0666ce784ef4d31cb860f6cad767438d9ea5156ff0ae86e029e0247bf94df75ee0cda4f2006061455cb2eaff513d558863ae334cef7a3d45f55e7cc13153c6719e9901c1d4db6c03f643b69ea4860690305651794284d9e61eb848ccdf5a77794d376f0af62e46d4835acce6fd9eef5df73ebb8ea3bb48629766967f446e744ecc57ff3642c4aa1ccee9a2f72d5caa75fa05787d08b79408fce792485fdecdc25df34820fb061275d70b84ece540b0fc47b2453612be34f2b78133a64e812598fbe225fd85415f8ffe5340ce955b5fd9d67dd88c1c531dde298ed25f96df271558c812c26fa386966c76f03a6ebccbca49ac955916929bd42e134f982dde03f924c464be5fd1ba44f8dc4c3cbc8162755fd1d8f7dc044b15b1a796c53df7d8769bb167b2045b49cc71e08908796c92c16a235717cabc4bb9f60f8f66ff4fff1f9836388a99583acebdff4a7fb20f48eedcd1f4bdcc06ec8b48e35307df51d9bc81d38a94992dd135b30079e1f592da6e98dff496cb1a7776460a26b06395b176f585636ebdf7eab692b227a31d6979f5a6141292698e91346b6c806b90c7c6971e481559cae92ee8f4136f2226861f5c39ddd29bbdb118a35dece03f49a96804caea79a3dacfbf09d65f2611b5622de51d98e18151acb3bb84c09caaa0cc80edfa743a4679f37d6167618ce99e73362fa6f213409931762618a61f1738c071bba5afc1db24fe94afb70c40d731908ab9a505f76f57a7d40e708fd3df0efc5b7cbb2a7b75cd23449e09684a2f0e2bfa0d6176c35f96fe94d92fc9fa4103972781f81cb6e8df7dbeb0fc529c600d768bed3f08828b773d284f69e9a203459d88c12d6df7a75be2455fec128f07a497a2b2bf626cc6272d0419ca663e9dc66b8224227eb796f0246dcae9c5b0b6cfdbbd40c3245a610481c92047c968c9fc92c04b89cc41a0c15355a8f").unwrap());
        // Dave handles the onion message but he'll log that he errored while decoding the hop data
        // because he sees it as an empty onion message (the only contents of the sender's OM is "hello"
        // with TLV type 1, which Dave ignores because (1) it's odd and he can't understand it and (2) LDK
        // only attempts to parse custom OM TLVs with type > 64).
-       nodes[3].handle_onion_message(
-               &keys_mgrs[2].1.get_node_id(Recipient::Node).unwrap(), &carol_to_dave_om);
+       nodes[3].messenger.handle_onion_message(&nodes[2].node_id, &carol_to_dave_om);
 }
index 0e68d09143c00d251c69ee251162960b05ec3bed..a8ffcc02466f5afa918bf299e7a41572d24b72fc 100644 (file)
@@ -18,13 +18,14 @@ use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
 use crate::blinded_path::BlindedPath;
 use crate::blinded_path::message::{advance_path_by_one, ForwardTlvs, ReceiveTlvs};
 use crate::blinded_path::utils;
-use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient};
+use crate::events::{Event, EventHandler, EventsProvider};
+use crate::sign::{EntropySource, NodeSigner, Recipient};
 #[cfg(not(c_bindings))]
 use crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
 use crate::ln::features::{InitFeatures, NodeFeatures};
-use crate::ln::msgs::{self, OnionMessage, OnionMessageHandler};
+use crate::ln::msgs::{self, OnionMessage, OnionMessageHandler, SocketAddress};
 use crate::ln::onion_utils;
-use crate::ln::peer_handler::IgnoringMessageHandler;
+use crate::routing::gossip::{NetworkGraph, NodeId};
 pub use super::packet::OnionMessageContents;
 use super::packet::ParsedOnionMessageContents;
 use super::offers::OffersMessageHandler;
@@ -35,9 +36,18 @@ use crate::util::ser::Writeable;
 use core::fmt;
 use core::ops::Deref;
 use crate::io;
-use crate::sync::{Arc, Mutex};
+use crate::sync::Mutex;
 use crate::prelude::*;
 
+#[cfg(not(c_bindings))]
+use {
+       crate::sign::KeysManager,
+       crate::ln::peer_handler::IgnoringMessageHandler,
+       crate::sync::Arc,
+};
+
+pub(super) const MAX_TIMER_TICKS: usize = 2;
+
 /// A sender, receiver and forwarder of [`OnionMessage`]s.
 ///
 /// # Handling Messages
@@ -60,9 +70,9 @@ use crate::prelude::*;
 /// # extern crate bitcoin;
 /// # use bitcoin::hashes::_export::_core::time::Duration;
 /// # use bitcoin::hashes::hex::FromHex;
-/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey, self};
 /// # use lightning::blinded_path::BlindedPath;
-/// # use lightning::sign::KeysManager;
+/// # use lightning::sign::{EntropySource, KeysManager};
 /// # use lightning::ln::peer_handler::IgnoringMessageHandler;
 /// # use lightning::onion_message::{OnionMessageContents, Destination, MessageRouter, OnionMessagePath, OnionMessenger};
 /// # use lightning::util::logger::{Logger, Record};
@@ -71,12 +81,25 @@ use crate::prelude::*;
 /// # use std::sync::Arc;
 /// # struct FakeLogger;
 /// # impl Logger for FakeLogger {
-/// #     fn log(&self, record: &Record) { unimplemented!() }
+/// #     fn log(&self, record: Record) { println!("{:?}" , record); }
 /// # }
 /// # struct FakeMessageRouter {}
 /// # impl MessageRouter for FakeMessageRouter {
 /// #     fn find_path(&self, sender: PublicKey, peers: Vec<PublicKey>, destination: Destination) -> Result<OnionMessagePath, ()> {
-/// #         unimplemented!()
+/// #         let secp_ctx = Secp256k1::new();
+/// #         let node_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
+/// #         let hop_node_id1 = PublicKey::from_secret_key(&secp_ctx, &node_secret);
+/// #         let hop_node_id2 = hop_node_id1;
+/// #         Ok(OnionMessagePath {
+/// #             intermediate_nodes: vec![hop_node_id1, hop_node_id2],
+/// #             destination,
+/// #             first_node_addresses: None,
+/// #         })
+/// #     }
+/// #     fn create_blinded_paths<ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification>(
+/// #         &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES, _secp_ctx: &Secp256k1<T>
+/// #     ) -> Result<Vec<BlindedPath>, ()> {
+/// #         unreachable!()
 /// #     }
 /// # }
 /// # let seed = [42u8; 32];
@@ -86,7 +109,7 @@ use crate::prelude::*;
 /// # let node_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
 /// # let secp_ctx = Secp256k1::new();
 /// # let hop_node_id1 = PublicKey::from_secret_key(&secp_ctx, &node_secret);
-/// # let (hop_node_id2, hop_node_id3, hop_node_id4) = (hop_node_id1, hop_node_id1, hop_node_id1);
+/// # let (hop_node_id3, hop_node_id4) = (hop_node_id1, hop_node_id1);
 /// # let destination_node_id = hop_node_id1;
 /// # let message_router = Arc::new(FakeMessageRouter {});
 /// # let custom_message_handler = IgnoringMessageHandler {};
@@ -97,7 +120,8 @@ use crate::prelude::*;
 ///     &keys_manager, &keys_manager, logger, message_router, &offers_message_handler,
 ///     &custom_message_handler
 /// );
-///
+
+/// # #[derive(Debug)]
 /// # struct YourCustomMessage {}
 /// impl Writeable for YourCustomMessage {
 ///    fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
@@ -112,13 +136,10 @@ use crate::prelude::*;
 ///    }
 /// }
 /// // Send a custom onion message to a node id.
-/// let path = OnionMessagePath {
-///    intermediate_nodes: vec![hop_node_id1, hop_node_id2],
-///    destination: Destination::Node(destination_node_id),
-/// };
+/// let destination = Destination::Node(destination_node_id);
 /// let reply_path = None;
 /// # let message = YourCustomMessage {};
-/// onion_messenger.send_onion_message(path, message, reply_path);
+/// onion_messenger.send_onion_message(message, destination, reply_path);
 ///
 /// // Create a blinded path to yourself, for someone to send an onion message to.
 /// # let your_node_id = hop_node_id1;
@@ -126,13 +147,10 @@ use crate::prelude::*;
 /// let blinded_path = BlindedPath::new_for_message(&hops, &keys_manager, &secp_ctx).unwrap();
 ///
 /// // Send a custom onion message to a blinded path.
-/// let path = OnionMessagePath {
-///    intermediate_nodes: vec![hop_node_id1, hop_node_id2],
-///    destination: Destination::BlindedPath(blinded_path),
-/// };
+/// let destination = Destination::BlindedPath(blinded_path);
 /// let reply_path = None;
 /// # let message = YourCustomMessage {};
-/// onion_messenger.send_onion_message(path, message, reply_path);
+/// onion_messenger.send_onion_message(message, destination, reply_path);
 /// ```
 ///
 /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
@@ -144,18 +162,87 @@ where
        L::Target: Logger,
        MR::Target: MessageRouter,
        OMH::Target: OffersMessageHandler,
-       CMH:: Target: CustomOnionMessageHandler,
+       CMH::Target: CustomOnionMessageHandler,
 {
        entropy_source: ES,
        node_signer: NS,
        logger: L,
-       pending_messages: Mutex<HashMap<PublicKey, VecDeque<OnionMessage>>>,
+       message_recipients: Mutex<HashMap<PublicKey, OnionMessageRecipient>>,
        secp_ctx: Secp256k1<secp256k1::All>,
        message_router: MR,
        offers_handler: OMH,
        custom_handler: CMH,
 }
 
+/// [`OnionMessage`]s buffered to be sent.
+enum OnionMessageRecipient {
+       /// Messages for a node connected as a peer.
+       ConnectedPeer(VecDeque<OnionMessage>),
+
+       /// Messages for a node that is not yet connected, which are dropped after [`MAX_TIMER_TICKS`]
+       /// and tracked here.
+       PendingConnection(VecDeque<OnionMessage>, Option<Vec<SocketAddress>>, usize),
+}
+
+impl OnionMessageRecipient {
+       fn pending_connection(addresses: Vec<SocketAddress>) -> Self {
+               Self::PendingConnection(VecDeque::new(), Some(addresses), 0)
+       }
+
+       fn pending_messages(&self) -> &VecDeque<OnionMessage> {
+               match self {
+                       OnionMessageRecipient::ConnectedPeer(pending_messages) => pending_messages,
+                       OnionMessageRecipient::PendingConnection(pending_messages, _, _) => pending_messages,
+               }
+       }
+
+       fn enqueue_message(&mut self, message: OnionMessage) {
+               let pending_messages = match self {
+                       OnionMessageRecipient::ConnectedPeer(pending_messages) => pending_messages,
+                       OnionMessageRecipient::PendingConnection(pending_messages, _, _) => pending_messages,
+               };
+
+               pending_messages.push_back(message);
+       }
+
+       fn dequeue_message(&mut self) -> Option<OnionMessage> {
+               let pending_messages = match self {
+                       OnionMessageRecipient::ConnectedPeer(pending_messages) => pending_messages,
+                       OnionMessageRecipient::PendingConnection(pending_messages, _, _) => {
+                               debug_assert!(false);
+                               pending_messages
+                       },
+               };
+
+               pending_messages.pop_front()
+       }
+
+       #[cfg(test)]
+       fn release_pending_messages(&mut self) -> VecDeque<OnionMessage> {
+               let pending_messages = match self {
+                       OnionMessageRecipient::ConnectedPeer(pending_messages) => pending_messages,
+                       OnionMessageRecipient::PendingConnection(pending_messages, _, _) => pending_messages,
+               };
+
+               core::mem::take(pending_messages)
+       }
+
+       fn mark_connected(&mut self) {
+               if let OnionMessageRecipient::PendingConnection(pending_messages, _, _) = self {
+                       let mut new_pending_messages = VecDeque::new();
+                       core::mem::swap(pending_messages, &mut new_pending_messages);
+                       *self = OnionMessageRecipient::ConnectedPeer(new_pending_messages);
+               }
+       }
+
+       fn is_connected(&self) -> bool {
+               match self {
+                       OnionMessageRecipient::ConnectedPeer(..) => true,
+                       OnionMessageRecipient::PendingConnection(..) => false,
+               }
+       }
+}
+
 /// An [`OnionMessage`] for [`OnionMessenger`] to send.
 ///
 /// These are obtained when released from [`OnionMessenger`]'s handlers after which they are
@@ -177,7 +264,7 @@ pub struct PendingOnionMessage<T: OnionMessageContents> {
 ///
 /// These are obtained when released from [`OnionMessenger`]'s handlers after which they are
 /// enqueued for sending.
-pub type PendingOnionMessage<T: OnionMessageContents> = (T, Destination, Option<BlindedPath>);
+pub type PendingOnionMessage<T> = (T, Destination, Option<BlindedPath>);
 
 pub(crate) fn new_pending_onion_message<T: OnionMessageContents>(
        contents: T, destination: Destination, reply_path: Option<BlindedPath>
@@ -194,19 +281,105 @@ pub trait MessageRouter {
        fn find_path(
                &self, sender: PublicKey, peers: Vec<PublicKey>, destination: Destination
        ) -> Result<OnionMessagePath, ()>;
+
+       /// Creates [`BlindedPath`]s to the `recipient` node. The nodes in `peers` are assumed to be
+       /// direct peers with the `recipient`.
+       fn create_blinded_paths<
+               ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+       >(
+               &self, recipient: PublicKey, peers: Vec<PublicKey>, entropy_source: &ES,
+               secp_ctx: &Secp256k1<T>
+       ) -> Result<Vec<BlindedPath>, ()>;
 }
 
 /// A [`MessageRouter`] that can only route to a directly connected [`Destination`].
-pub struct DefaultMessageRouter;
+pub struct DefaultMessageRouter<G: Deref<Target=NetworkGraph<L>>, L: Deref>
+where
+       L::Target: Logger,
+{
+       network_graph: G,
+}
 
-impl MessageRouter for DefaultMessageRouter {
+impl<G: Deref<Target=NetworkGraph<L>>, L: Deref> DefaultMessageRouter<G, L>
+where
+       L::Target: Logger,
+{
+       /// Creates a [`DefaultMessageRouter`] using the given [`NetworkGraph`].
+       pub fn new(network_graph: G) -> Self {
+               Self { network_graph }
+       }
+}
+
+impl<G: Deref<Target=NetworkGraph<L>>, L: Deref> MessageRouter for DefaultMessageRouter<G, L>
+where
+       L::Target: Logger,
+{
        fn find_path(
                &self, _sender: PublicKey, peers: Vec<PublicKey>, destination: Destination
        ) -> Result<OnionMessagePath, ()> {
-               if peers.contains(&destination.first_node()) {
-                       Ok(OnionMessagePath { intermediate_nodes: vec![], destination })
+               let first_node = destination.first_node();
+               if peers.contains(&first_node) {
+                       Ok(OnionMessagePath {
+                               intermediate_nodes: vec![], destination, first_node_addresses: None
+                       })
                } else {
-                       Err(())
+                       let network_graph = self.network_graph.deref().read_only();
+                       let node_announcement = network_graph
+                               .node(&NodeId::from_pubkey(&first_node))
+                               .and_then(|node_info| node_info.announcement_info.as_ref())
+                               .and_then(|announcement_info| announcement_info.announcement_message.as_ref())
+                               .map(|node_announcement| &node_announcement.contents);
+
+                       match node_announcement {
+                               Some(node_announcement) if node_announcement.features.supports_onion_messages() => {
+                                       let first_node_addresses = Some(node_announcement.addresses.clone());
+                                       Ok(OnionMessagePath {
+                                               intermediate_nodes: vec![], destination, first_node_addresses
+                                       })
+                               },
+                               _ => Err(()),
+                       }
+               }
+       }
+
+       fn create_blinded_paths<
+               ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+       >(
+               &self, recipient: PublicKey, peers: Vec<PublicKey>, entropy_source: &ES,
+               secp_ctx: &Secp256k1<T>
+       ) -> Result<Vec<BlindedPath>, ()> {
+               // Limit the number of blinded paths that are computed.
+               const MAX_PATHS: usize = 3;
+
+               // Ensure peers have at least three channels so that it is more difficult to infer the
+               // recipient's node_id.
+               const MIN_PEER_CHANNELS: usize = 3;
+
+               let network_graph = self.network_graph.deref().read_only();
+               let paths = peers.iter()
+                       // Limit to peers with announced channels
+                       .filter(|pubkey|
+                               network_graph
+                                       .node(&NodeId::from_pubkey(pubkey))
+                                       .map(|info| &info.channels[..])
+                                       .map(|channels| channels.len() >= MIN_PEER_CHANNELS)
+                                       .unwrap_or(false)
+                       )
+                       .map(|pubkey| vec![*pubkey, recipient])
+                       .map(|node_pks| BlindedPath::new_for_message(&node_pks, entropy_source, secp_ctx))
+                       .take(MAX_PATHS)
+                       .collect::<Result<Vec<_>, _>>();
+
+               match paths {
+                       Ok(paths) if !paths.is_empty() => Ok(paths),
+                       _ => {
+                               if network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)) {
+                                       BlindedPath::one_hop_for_message(recipient, entropy_source, secp_ctx)
+                                               .map(|path| vec![path])
+                               } else {
+                                       Err(())
+                               }
+                       },
                }
        }
 }
@@ -219,6 +392,22 @@ pub struct OnionMessagePath {
 
        /// The recipient of the message.
        pub destination: Destination,
+
+       /// Addresses that may be used to connect to [`OnionMessagePath::first_node`].
+       ///
+       /// Only needs to be set if a connection to the node is required. [`OnionMessenger`] may use
+       /// this to initiate such a connection.
+       pub first_node_addresses: Option<Vec<SocketAddress>>,
+}
+
+impl OnionMessagePath {
+       /// Returns the first node in the path.
+       pub fn first_node(&self) -> PublicKey {
+               self.intermediate_nodes
+                       .first()
+                       .copied()
+                       .unwrap_or_else(|| self.destination.first_node())
+       }
 }
 
 /// The destination of an onion message.
@@ -246,6 +435,19 @@ impl Destination {
        }
 }
 
+/// Result of successfully [sending an onion message].
+///
+/// [sending an onion message]: OnionMessenger::send_onion_message
+#[derive(Debug, PartialEq, Eq)]
+pub enum SendSuccess {
+       /// The message was buffered and will be sent once it is processed by
+       /// [`OnionMessageHandler::next_onion_message_for_peer`].
+       Buffered,
+       /// The message was buffered and will be sent once the node is connected as a peer and it is
+       /// processed by [`OnionMessageHandler::next_onion_message_for_peer`].
+       BufferedAwaitingConnection(PublicKey),
+}
+
 /// Errors that may occur when [sending an onion message].
 ///
 /// [sending an onion message]: OnionMessenger::send_onion_message
@@ -259,8 +461,10 @@ pub enum SendError {
        /// The provided [`Destination`] was an invalid [`BlindedPath`] due to not having any blinded
        /// hops.
        TooFewBlindedHops,
-       /// Our next-hop peer was offline or does not support onion message forwarding.
-       InvalidFirstHop,
+       /// The first hop is not a peer and doesn't have a known [`SocketAddress`].
+       InvalidFirstHop(PublicKey),
+       /// A path from the sender to the destination could not be found by the [`MessageRouter`].
+       PathNotFound,
        /// Onion message contents must have a TLV type >= 64.
        InvalidMessage,
        /// Our next-hop peer's buffer was full or our total outbound buffer was full.
@@ -327,16 +531,17 @@ pub enum PeeledOnion<T: OnionMessageContents> {
 /// Creates an [`OnionMessage`] with the given `contents` for sending to the destination of
 /// `path`.
 ///
-/// Returns both the node id of the peer to send the message to and the message itself.
+/// Returns the node id of the peer to send the message to, the message itself, and any addresses
+/// need to connect to the first node.
 pub fn create_onion_message<ES: Deref, NS: Deref, T: OnionMessageContents>(
        entropy_source: &ES, node_signer: &NS, secp_ctx: &Secp256k1<secp256k1::All>,
        path: OnionMessagePath, contents: T, reply_path: Option<BlindedPath>,
-) -> Result<(PublicKey, OnionMessage), SendError>
+) -> Result<(PublicKey, OnionMessage, Option<Vec<SocketAddress>>), SendError>
 where
        ES::Target: EntropySource,
        NS::Target: NodeSigner,
 {
-       let OnionMessagePath { intermediate_nodes, mut destination } = path;
+       let OnionMessagePath { intermediate_nodes, mut destination, first_node_addresses } = path;
        if let Destination::BlindedPath(BlindedPath { ref blinded_hops, .. }) = destination {
                if blinded_hops.is_empty() {
                        return Err(SendError::TooFewBlindedHops);
@@ -377,10 +582,8 @@ where
        let onion_routing_packet = construct_onion_message_packet(
                packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
 
-       Ok((first_node_id, OnionMessage {
-               blinding_point,
-               onion_routing_packet
-       }))
+       let message = OnionMessage { blinding_point, onion_routing_packet };
+       Ok((first_node_id, message, first_node_addresses))
 }
 
 /// Decode one layer of an incoming [`OnionMessage`].
@@ -501,7 +704,7 @@ where
                OnionMessenger {
                        entropy_source,
                        node_signer,
-                       pending_messages: Mutex::new(HashMap::new()),
+                       message_recipients: Mutex::new(HashMap::new()),
                        secp_ctx,
                        logger,
                        message_router,
@@ -510,35 +713,113 @@ where
                }
        }
 
-       /// Sends an [`OnionMessage`] with the given `contents` for sending to the destination of
-       /// `path`.
+       /// Sends an [`OnionMessage`] with the given `contents` to `destination`.
        ///
        /// See [`OnionMessenger`] for example usage.
        pub fn send_onion_message<T: OnionMessageContents>(
-               &self, path: OnionMessagePath, contents: T, reply_path: Option<BlindedPath>
-       ) -> Result<(), SendError> {
-               let (first_node_id, onion_msg) = create_onion_message(
+               &self, contents: T, destination: Destination, reply_path: Option<BlindedPath>
+       ) -> Result<SendSuccess, SendError> {
+               self.find_path_and_enqueue_onion_message(
+                       contents, destination, reply_path, format_args!("")
+               )
+       }
+
+       fn find_path_and_enqueue_onion_message<T: OnionMessageContents>(
+               &self, contents: T, destination: Destination, reply_path: Option<BlindedPath>,
+               log_suffix: fmt::Arguments
+       ) -> Result<SendSuccess, SendError> {
+               let result = self.find_path(destination)
+                       .and_then(|path| self.enqueue_onion_message(path, contents, reply_path, log_suffix));
+
+               match result.as_ref() {
+                       Err(SendError::GetNodeIdFailed) => {
+                               log_warn!(self.logger, "Unable to retrieve node id {}", log_suffix);
+                       },
+                       Err(SendError::PathNotFound) => {
+                               log_trace!(self.logger, "Failed to find path {}", log_suffix);
+                       },
+                       Err(e) => {
+                               log_trace!(self.logger, "Failed sending onion message {}: {:?}", log_suffix, e);
+                       },
+                       Ok(SendSuccess::Buffered) => {
+                               log_trace!(self.logger, "Buffered onion message {}", log_suffix);
+                       },
+                       Ok(SendSuccess::BufferedAwaitingConnection(node_id)) => {
+                               log_trace!(
+                                       self.logger, "Buffered onion message waiting on peer connection {}: {:?}",
+                                       log_suffix, node_id
+                               );
+                       },
+               }
+
+               result
+       }
+
+       fn find_path(&self, destination: Destination) -> Result<OnionMessagePath, SendError> {
+               let sender = self.node_signer
+                       .get_node_id(Recipient::Node)
+                       .map_err(|_| SendError::GetNodeIdFailed)?;
+
+               let peers = self.message_recipients.lock().unwrap()
+                       .iter()
+                       .filter(|(_, recipient)| matches!(recipient, OnionMessageRecipient::ConnectedPeer(_)))
+                       .map(|(node_id, _)| *node_id)
+                       .collect();
+
+               self.message_router
+                       .find_path(sender, peers, destination)
+                       .map_err(|_| SendError::PathNotFound)
+       }
+
+       fn enqueue_onion_message<T: OnionMessageContents>(
+               &self, path: OnionMessagePath, contents: T, reply_path: Option<BlindedPath>,
+               log_suffix: fmt::Arguments
+       ) -> Result<SendSuccess, SendError> {
+               log_trace!(self.logger, "Constructing onion message {}: {:?}", log_suffix, contents);
+
+               let (first_node_id, onion_message, addresses) = create_onion_message(
                        &self.entropy_source, &self.node_signer, &self.secp_ctx, path, contents, reply_path
                )?;
 
-               let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
-               if outbound_buffer_full(&first_node_id, &pending_per_peer_msgs) { return Err(SendError::BufferFull) }
-               match pending_per_peer_msgs.entry(first_node_id) {
-                       hash_map::Entry::Vacant(_) => Err(SendError::InvalidFirstHop),
+               let mut message_recipients = self.message_recipients.lock().unwrap();
+               if outbound_buffer_full(&first_node_id, &message_recipients) {
+                       return Err(SendError::BufferFull);
+               }
+
+               match message_recipients.entry(first_node_id) {
+                       hash_map::Entry::Vacant(e) => match addresses {
+                               None => Err(SendError::InvalidFirstHop(first_node_id)),
+                               Some(addresses) => {
+                                       e.insert(OnionMessageRecipient::pending_connection(addresses))
+                                               .enqueue_message(onion_message);
+                                       Ok(SendSuccess::BufferedAwaitingConnection(first_node_id))
+                               },
+                       },
                        hash_map::Entry::Occupied(mut e) => {
-                               e.get_mut().push_back(onion_msg);
-                               Ok(())
-                       }
+                               e.get_mut().enqueue_message(onion_message);
+                               if e.get().is_connected() {
+                                       Ok(SendSuccess::Buffered)
+                               } else {
+                                       Ok(SendSuccess::BufferedAwaitingConnection(first_node_id))
+                               }
+                       },
                }
        }
 
+       #[cfg(test)]
+       pub(super) fn send_onion_message_using_path<T: OnionMessageContents>(
+               &self, path: OnionMessagePath, contents: T, reply_path: Option<BlindedPath>
+       ) -> Result<SendSuccess, SendError> {
+               self.enqueue_onion_message(path, contents, reply_path, format_args!(""))
+       }
+
        fn handle_onion_message_response<T: OnionMessageContents>(
                &self, response: Option<T>, reply_path: Option<BlindedPath>, log_suffix: fmt::Arguments
        ) {
                if let Some(response) = response {
                        match reply_path {
                                Some(reply_path) => {
-                                       self.find_path_and_enqueue_onion_message(
+                                       let _ = self.find_path_and_enqueue_onion_message(
                                                response, Destination::BlindedPath(reply_path), None, log_suffix
                                        );
                                },
@@ -549,55 +830,26 @@ where
                }
        }
 
-       fn find_path_and_enqueue_onion_message<T: OnionMessageContents>(
-               &self, contents: T, destination: Destination, reply_path: Option<BlindedPath>,
-               log_suffix: fmt::Arguments
-       ) {
-               let sender = match self.node_signer.get_node_id(Recipient::Node) {
-                       Ok(node_id) => node_id,
-                       Err(_) => {
-                               log_warn!(self.logger, "Unable to retrieve node id {}", log_suffix);
-                               return;
-                       }
-               };
-
-               let peers = self.pending_messages.lock().unwrap().keys().copied().collect();
-               let path = match self.message_router.find_path(sender, peers, destination) {
-                       Ok(path) => path,
-                       Err(()) => {
-                               log_trace!(self.logger, "Failed to find path {}", log_suffix);
-                               return;
-                       },
-               };
-
-               log_trace!(self.logger, "Sending onion message {}", log_suffix);
-
-               if let Err(e) = self.send_onion_message(path, contents, reply_path) {
-                       log_trace!(self.logger, "Failed sending onion message {}: {:?}", log_suffix, e);
-                       return;
-               }
-       }
-
        #[cfg(test)]
        pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, VecDeque<OnionMessage>> {
-               let mut pending_msgs = self.pending_messages.lock().unwrap();
+               let mut message_recipients = self.message_recipients.lock().unwrap();
                let mut msgs = HashMap::new();
                // We don't want to disconnect the peers by removing them entirely from the original map, so we
-               // swap the pending message buffers individually.
-               for (peer_node_id, pending_messages) in &mut *pending_msgs {
-                       msgs.insert(*peer_node_id, core::mem::take(pending_messages));
+               // release the pending message buffers individually.
+               for (node_id, recipient) in &mut *message_recipients {
+                       msgs.insert(*node_id, recipient.release_pending_messages());
                }
                msgs
        }
 }
 
-fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap<PublicKey, VecDeque<OnionMessage>>) -> bool {
+fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap<PublicKey, OnionMessageRecipient>) -> bool {
        const MAX_TOTAL_BUFFER_SIZE: usize = (1 << 20) * 128;
        const MAX_PER_PEER_BUFFER_SIZE: usize = (1 << 10) * 256;
        let mut total_buffered_bytes = 0;
        let mut peer_buffered_bytes = 0;
        for (pk, peer_buf) in buffer {
-               for om in peer_buf {
+               for om in peer_buf.pending_messages() {
                        let om_len = om.serialized_length();
                        if pk == peer_node_id {
                                peer_buffered_bytes += om_len;
@@ -614,6 +866,27 @@ fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap<PublicKey, Ve
        false
 }
 
+impl<ES: Deref, NS: Deref, L: Deref, MR: Deref, OMH: Deref, CMH: Deref> EventsProvider
+for OnionMessenger<ES, NS, L, MR, OMH, CMH>
+where
+       ES::Target: EntropySource,
+       NS::Target: NodeSigner,
+       L::Target: Logger,
+       MR::Target: MessageRouter,
+       OMH::Target: OffersMessageHandler,
+       CMH::Target: CustomOnionMessageHandler,
+{
+       fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
+               for (node_id, recipient) in self.message_recipients.lock().unwrap().iter_mut() {
+                       if let OnionMessageRecipient::PendingConnection(_, addresses, _) = recipient {
+                               if let Some(addresses) = addresses.take() {
+                                       handler.handle_event(Event::ConnectionNeeded { node_id: *node_id, addresses });
+                               }
+                       }
+               }
+       }
+}
+
 impl<ES: Deref, NS: Deref, L: Deref, MR: Deref, OMH: Deref, CMH: Deref> OnionMessageHandler
 for OnionMessenger<ES, NS, L, MR, OMH, CMH>
 where
@@ -629,9 +902,10 @@ where
                        msg, &self.secp_ctx, &*self.node_signer, &*self.logger, &*self.custom_handler
                ) {
                        Ok(PeeledOnion::Receive(message, path_id, reply_path)) => {
-                               log_trace!(self.logger,
-                                       "Received an onion message with path_id {:02x?} and {} reply_path",
-                                               path_id, if reply_path.is_some() { "a" } else { "no" });
+                               log_trace!(
+                                       self.logger,
+                                  "Received an onion message with path_id {:02x?} and {} reply_path: {:?}",
+                                       path_id, if reply_path.is_some() { "a" } else { "no" }, message);
 
                                match message {
                                        ParsedOnionMessageContents::Offers(msg) => {
@@ -655,24 +929,28 @@ where
                                }
                        },
                        Ok(PeeledOnion::Forward(next_node_id, onion_message)) => {
-                               let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
-                               if outbound_buffer_full(&next_node_id, &pending_per_peer_msgs) {
+                               let mut message_recipients = self.message_recipients.lock().unwrap();
+                               if outbound_buffer_full(&next_node_id, &message_recipients) {
                                        log_trace!(self.logger, "Dropping forwarded onion message to peer {:?}: outbound buffer full", next_node_id);
                                        return
                                }
 
                                #[cfg(fuzzing)]
-                               pending_per_peer_msgs.entry(next_node_id).or_insert_with(VecDeque::new);
-
-                               match pending_per_peer_msgs.entry(next_node_id) {
-                                       hash_map::Entry::Vacant(_) => {
+                               message_recipients
+                                       .entry(next_node_id)
+                                       .or_insert_with(|| OnionMessageRecipient::ConnectedPeer(VecDeque::new()));
+
+                               match message_recipients.entry(next_node_id) {
+                                       hash_map::Entry::Occupied(mut e) if matches!(
+                                               e.get(), OnionMessageRecipient::ConnectedPeer(..)
+                                       ) => {
+                                               e.get_mut().enqueue_message(onion_message);
+                                               log_trace!(self.logger, "Forwarding an onion message to peer {}", next_node_id);
+                                       },
+                                       _ => {
                                                log_trace!(self.logger, "Dropping forwarded onion message to disconnected peer {:?}", next_node_id);
                                                return
                                        },
-                                       hash_map::Entry::Occupied(mut e) => {
-                                               e.get_mut().push_back(onion_message);
-                                               log_trace!(self.logger, "Forwarding an onion message to peer {}", next_node_id);
-                                       }
                                }
                        },
                        Err(e) => {
@@ -683,15 +961,43 @@ where
 
        fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init, _inbound: bool) -> Result<(), ()> {
                if init.features.supports_onion_messages() {
-                       let mut peers = self.pending_messages.lock().unwrap();
-                       peers.insert(their_node_id.clone(), VecDeque::new());
+                       self.message_recipients.lock().unwrap()
+                               .entry(*their_node_id)
+                               .or_insert_with(|| OnionMessageRecipient::ConnectedPeer(VecDeque::new()))
+                               .mark_connected();
+               } else {
+                       self.message_recipients.lock().unwrap().remove(their_node_id);
                }
+
                Ok(())
        }
 
        fn peer_disconnected(&self, their_node_id: &PublicKey) {
-               let mut pending_msgs = self.pending_messages.lock().unwrap();
-               pending_msgs.remove(their_node_id);
+               match self.message_recipients.lock().unwrap().remove(their_node_id) {
+                       Some(OnionMessageRecipient::ConnectedPeer(..)) => {},
+                       Some(_) => debug_assert!(false),
+                       None => {},
+               }
+       }
+
+       fn timer_tick_occurred(&self) {
+               let mut message_recipients = self.message_recipients.lock().unwrap();
+
+               // Drop any pending recipients since the last call to avoid retaining buffered messages for
+               // too long.
+               message_recipients.retain(|_, recipient| match recipient {
+                       OnionMessageRecipient::PendingConnection(_, None, ticks) => *ticks < MAX_TIMER_TICKS,
+                       OnionMessageRecipient::PendingConnection(_, Some(_), _) => true,
+                       _ => true,
+               });
+
+               // Increment a timer tick for pending recipients so that their buffered messages are dropped
+               // at MAX_TIMER_TICKS.
+               for recipient in message_recipients.values_mut() {
+                       if let OnionMessageRecipient::PendingConnection(_, None, ticks) = recipient {
+                               *ticks += 1;
+                       }
+               }
        }
 
        fn provided_node_features(&self) -> NodeFeatures {
@@ -716,7 +1022,7 @@ where
                        let PendingOnionMessage { contents, destination, reply_path } = message;
                        #[cfg(c_bindings)]
                        let (contents, destination, reply_path) = message;
-                       self.find_path_and_enqueue_onion_message(
+                       let _ = self.find_path_and_enqueue_onion_message(
                                contents, destination, reply_path, format_args!("when sending OffersMessage")
                        );
                }
@@ -727,16 +1033,14 @@ where
                        let PendingOnionMessage { contents, destination, reply_path } = message;
                        #[cfg(c_bindings)]
                        let (contents, destination, reply_path) = message;
-                       self.find_path_and_enqueue_onion_message(
+                       let _ = self.find_path_and_enqueue_onion_message(
                                contents, destination, reply_path, format_args!("when sending CustomMessage")
                        );
                }
 
-               let mut pending_msgs = self.pending_messages.lock().unwrap();
-               if let Some(msgs) = pending_msgs.get_mut(&peer_node_id) {
-                       return msgs.pop_front()
-               }
-               None
+               self.message_recipients.lock().unwrap()
+                       .get_mut(&peer_node_id)
+                       .and_then(|buffer| buffer.dequeue_message())
        }
 }
 
@@ -754,7 +1058,7 @@ pub type SimpleArcOnionMessenger<M, T, F, L> = OnionMessenger<
        Arc<KeysManager>,
        Arc<KeysManager>,
        Arc<L>,
-       Arc<DefaultMessageRouter>,
+       Arc<DefaultMessageRouter<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>,
        Arc<SimpleArcChannelManager<M, T, F, L>>,
        IgnoringMessageHandler
 >;
@@ -773,7 +1077,7 @@ pub type SimpleRefOnionMessenger<
        &'a KeysManager,
        &'a KeysManager,
        &'b L,
-       &'i DefaultMessageRouter,
+       &'i DefaultMessageRouter<&'g NetworkGraph<&'b L>, &'b L>,
        &'j SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L>,
        IgnoringMessageHandler
 >;
index 533b4cb571805c98be6d4235fef190de00d7ff7c..a65c0393a1e922042482cd3d16dca877cf110ea2 100644 (file)
@@ -10,6 +10,7 @@
 //! Message handling for BOLT 12 Offers.
 
 use core::convert::TryFrom;
+use core::fmt;
 use crate::io::{self, Read};
 use crate::ln::msgs::DecodeError;
 use crate::offers::invoice_error::InvoiceError;
@@ -17,9 +18,10 @@ use crate::offers::invoice_request::InvoiceRequest;
 use crate::offers::invoice::Bolt12Invoice;
 use crate::offers::parse::Bolt12ParseError;
 use crate::onion_message::OnionMessageContents;
-use crate::onion_message::messenger::PendingOnionMessage;
 use crate::util::logger::Logger;
 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
+#[cfg(not(c_bindings))]
+use crate::onion_message::messenger::PendingOnionMessage;
 
 use crate::prelude::*;
 
@@ -58,7 +60,7 @@ pub trait OffersMessageHandler {
 /// Possible BOLT 12 Offers messages sent and received via an [`OnionMessage`].
 ///
 /// [`OnionMessage`]: crate::ln::msgs::OnionMessage
-#[derive(Clone, Debug)]
+#[derive(Clone)]
 pub enum OffersMessage {
        /// A request for a [`Bolt12Invoice`] for a particular [`Offer`].
        ///
@@ -92,6 +94,22 @@ impl OffersMessage {
        }
 }
 
+impl fmt::Debug for OffersMessage {
+       fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+               match self {
+                       OffersMessage::InvoiceRequest(message) => {
+                               write!(f, "{:?}", message.as_tlv_stream())
+                       }
+                       OffersMessage::Invoice(message) => {
+                               write!(f, "{:?}", message.as_tlv_stream())
+                       }
+                       OffersMessage::InvoiceError(message) => {
+                               write!(f, "{:?}", message)
+                       }
+               }
+       }
+}
+
 impl OnionMessageContents for OffersMessage {
        fn tlv_type(&self) -> u64 {
                match self {
index 37442ab31efadc066ce639c95aee45124d2a309d..5ce02c54d08800b777a0c2c3a25f27ba1ffae462 100644 (file)
@@ -147,7 +147,7 @@ impl<T: OnionMessageContents> Writeable for ParsedOnionMessageContents<T> {
 }
 
 /// The contents of an onion message.
-pub trait OnionMessageContents: Writeable {
+pub trait OnionMessageContents: Writeable + core::fmt::Debug {
        /// Returns the TLV type identifying the message contents. MUST be >= 64.
        fn tlv_type(&self) -> u64;
 }
index f556fabad0cbcdc9f3d94924bd236080c692c865..9b4e41ae174d7b8a73cb8f64b7918f62a0dbf01b 100644 (file)
@@ -870,31 +870,31 @@ impl ChannelInfo {
        /// Returns a [`DirectedChannelInfo`] for the channel directed to the given `target` from a
        /// returned `source`, or `None` if `target` is not one of the channel's counterparties.
        pub fn as_directed_to(&self, target: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> {
-               let (direction, source) = {
+               let (direction, source, outbound) = {
                        if target == &self.node_one {
-                               (self.two_to_one.as_ref(), &self.node_two)
+                               (self.two_to_one.as_ref(), &self.node_two, false)
                        } else if target == &self.node_two {
-                               (self.one_to_two.as_ref(), &self.node_one)
+                               (self.one_to_two.as_ref(), &self.node_one, true)
                        } else {
                                return None;
                        }
                };
-               direction.map(|dir| (DirectedChannelInfo::new(self, dir), source))
+               direction.map(|dir| (DirectedChannelInfo::new(self, dir, outbound), source))
        }
 
        /// Returns a [`DirectedChannelInfo`] for the channel directed from the given `source` to a
        /// returned `target`, or `None` if `source` is not one of the channel's counterparties.
        pub fn as_directed_from(&self, source: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> {
-               let (direction, target) = {
+               let (direction, target, outbound) = {
                        if source == &self.node_one {
-                               (self.one_to_two.as_ref(), &self.node_two)
+                               (self.one_to_two.as_ref(), &self.node_two, true)
                        } else if source == &self.node_two {
-                               (self.two_to_one.as_ref(), &self.node_one)
+                               (self.two_to_one.as_ref(), &self.node_one, false)
                        } else {
                                return None;
                        }
                };
-               direction.map(|dir| (DirectedChannelInfo::new(self, dir), target))
+               direction.map(|dir| (DirectedChannelInfo::new(self, dir, outbound), target))
        }
 
        /// Returns a [`ChannelUpdateInfo`] based on the direction implied by the channel_flag.
@@ -990,51 +990,55 @@ impl Readable for ChannelInfo {
 pub struct DirectedChannelInfo<'a> {
        channel: &'a ChannelInfo,
        direction: &'a ChannelUpdateInfo,
-       htlc_maximum_msat: u64,
-       effective_capacity: EffectiveCapacity,
+       /// The direction this channel is in - if set, it indicates that we're traversing the channel
+       /// from [`ChannelInfo::node_one`] to [`ChannelInfo::node_two`].
+       from_node_one: bool,
 }
 
 impl<'a> DirectedChannelInfo<'a> {
        #[inline]
-       fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo) -> Self {
-               let mut htlc_maximum_msat = direction.htlc_maximum_msat;
-               let capacity_msat = channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000);
-
-               let effective_capacity = match capacity_msat {
-                       Some(capacity_msat) => {
-                               htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat);
-                               EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: htlc_maximum_msat }
-                       },
-                       None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat },
-               };
-
-               Self {
-                       channel, direction, htlc_maximum_msat, effective_capacity
-               }
+       fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, from_node_one: bool) -> Self {
+               Self { channel, direction, from_node_one }
        }
 
        /// Returns information for the channel.
        #[inline]
        pub fn channel(&self) -> &'a ChannelInfo { self.channel }
 
-       /// Returns the maximum HTLC amount allowed over the channel in the direction.
-       #[inline]
-       pub fn htlc_maximum_msat(&self) -> u64 {
-               self.htlc_maximum_msat
-       }
-
        /// Returns the [`EffectiveCapacity`] of the channel in the direction.
        ///
        /// This is either the total capacity from the funding transaction, if known, or the
        /// `htlc_maximum_msat` for the direction as advertised by the gossip network, if known,
        /// otherwise.
+       #[inline]
        pub fn effective_capacity(&self) -> EffectiveCapacity {
-               self.effective_capacity
+               let mut htlc_maximum_msat = self.direction().htlc_maximum_msat;
+               let capacity_msat = self.channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000);
+
+               match capacity_msat {
+                       Some(capacity_msat) => {
+                               htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat);
+                               EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat }
+                       },
+                       None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat },
+               }
        }
 
        /// Returns information for the direction.
        #[inline]
        pub(super) fn direction(&self) -> &'a ChannelUpdateInfo { self.direction }
+
+       /// Returns the `node_id` of the source hop.
+       ///
+       /// Refers to the `node_id` forwarding the payment to the next hop.
+       #[inline]
+       pub(super) fn source(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_one } else { &self.channel.node_two } }
+
+       /// Returns the `node_id` of the target hop.
+       ///
+       /// Refers to the `node_id` receiving the payment from the previous hop.
+       #[inline]
+       pub(super) fn target(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_two } else { &self.channel.node_one } }
 }
 
 impl<'a> fmt::Debug for DirectedChannelInfo<'a> {
index a2d9f51a38e6a4edc2a8e64c35798c5184e1c5ca..f79da5ee9ec2d5b4340837cd486b1f59678c74b7 100644 (file)
@@ -9,18 +9,21 @@
 
 //! The router finds paths within a [`NetworkGraph`] for a payment.
 
-use bitcoin::secp256k1::PublicKey;
+use bitcoin::secp256k1::{PublicKey, Secp256k1, self};
 use bitcoin::hashes::Hash;
 use bitcoin::hashes::sha256::Hash as Sha256;
 
 use crate::blinded_path::{BlindedHop, BlindedPath};
+use crate::blinded_path::payment::{ForwardNode, ForwardTlvs, PaymentConstraints, PaymentRelay, ReceiveTlvs};
 use crate::ln::PaymentHash;
 use crate::ln::channelmanager::{ChannelDetails, PaymentId};
-use crate::ln::features::{Bolt11InvoiceFeatures, Bolt12InvoiceFeatures, ChannelFeatures, NodeFeatures};
+use crate::ln::features::{BlindedHopFeatures, Bolt11InvoiceFeatures, Bolt12InvoiceFeatures, ChannelFeatures, NodeFeatures};
 use crate::ln::msgs::{DecodeError, ErrorAction, LightningError, MAX_VALUE_MSAT};
 use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice};
+use crate::onion_message::{DefaultMessageRouter, Destination, MessageRouter, OnionMessagePath};
 use crate::routing::gossip::{DirectedChannelInfo, EffectiveCapacity, ReadOnlyNetworkGraph, NetworkGraph, NodeId, RoutingFees};
 use crate::routing::scoring::{ChannelUsage, LockableScore, ScoreLookUp};
+use crate::sign::EntropySource;
 use crate::util::ser::{Writeable, Readable, ReadableArgs, Writer};
 use crate::util::logger::{Level, Logger};
 use crate::util::chacha20::ChaCha20;
@@ -33,7 +36,7 @@ use core::{cmp, fmt};
 use core::ops::Deref;
 
 /// A [`Router`] implemented using [`find_route`].
-pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> where
+pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> where
        L::Target: Logger,
        S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
 {
@@ -41,21 +44,23 @@ pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref,
        logger: L,
        random_seed_bytes: Mutex<[u8; 32]>,
        scorer: S,
-       score_params: SP
+       score_params: SP,
+       message_router: DefaultMessageRouter<G, L>,
 }
 
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> DefaultRouter<G, L, S, SP, Sc> where
+impl<G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> DefaultRouter<G, L, S, SP, Sc> where
        L::Target: Logger,
        S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
 {
        /// Creates a new router.
        pub fn new(network_graph: G, logger: L, random_seed_bytes: [u8; 32], scorer: S, score_params: SP) -> Self {
                let random_seed_bytes = Mutex::new(random_seed_bytes);
-               Self { network_graph, logger, random_seed_bytes, scorer, score_params }
+               let message_router = DefaultMessageRouter::new(network_graph.clone());
+               Self { network_graph, logger, random_seed_bytes, scorer, score_params, message_router }
        }
 }
 
-impl< G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> Router for DefaultRouter<G, L, S, SP, Sc> where
+impl<G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> Router for DefaultRouter<G, L, S, SP, Sc> where
        L::Target: Logger,
        S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
 {
@@ -78,10 +83,109 @@ impl< G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: Sco
                        &random_seed_bytes
                )
        }
+
+       fn create_blinded_payment_paths<
+               ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+       >(
+               &self, recipient: PublicKey, first_hops: Vec<ChannelDetails>, tlvs: ReceiveTlvs,
+               amount_msats: u64, entropy_source: &ES, secp_ctx: &Secp256k1<T>
+       ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
+               // Limit the number of blinded paths that are computed.
+               const MAX_PAYMENT_PATHS: usize = 3;
+
+               // Ensure peers have at least three channels so that it is more difficult to infer the
+               // recipient's node_id.
+               const MIN_PEER_CHANNELS: usize = 3;
+
+               let network_graph = self.network_graph.deref().read_only();
+               let paths = first_hops.into_iter()
+                       .filter(|details| details.counterparty.features.supports_route_blinding())
+                       .filter(|details| amount_msats <= details.inbound_capacity_msat)
+                       .filter(|details| amount_msats >= details.inbound_htlc_minimum_msat.unwrap_or(0))
+                       .filter(|details| amount_msats <= details.inbound_htlc_maximum_msat.unwrap_or(u64::MAX))
+                       .filter(|details| network_graph
+                                       .node(&NodeId::from_pubkey(&details.counterparty.node_id))
+                                       .map(|node_info| node_info.channels.len() >= MIN_PEER_CHANNELS)
+                                       .unwrap_or(false)
+                       )
+                       .filter_map(|details| {
+                               let short_channel_id = match details.get_inbound_payment_scid() {
+                                       Some(short_channel_id) => short_channel_id,
+                                       None => return None,
+                               };
+                               let payment_relay: PaymentRelay = match details.counterparty.forwarding_info {
+                                       Some(forwarding_info) => forwarding_info.into(),
+                                       None => return None,
+                               };
+
+                               // Avoid exposing esoteric CLTV expiry deltas
+                               let cltv_expiry_delta = match payment_relay.cltv_expiry_delta {
+                                       0..=40 => 40u32,
+                                       41..=80 => 80u32,
+                                       81..=144 => 144u32,
+                                       145..=216 => 216u32,
+                                       _ => return None,
+                               };
+
+                               let payment_constraints = PaymentConstraints {
+                                       max_cltv_expiry: tlvs.payment_constraints.max_cltv_expiry + cltv_expiry_delta,
+                                       htlc_minimum_msat: details.inbound_htlc_minimum_msat.unwrap_or(0),
+                               };
+                               Some(ForwardNode {
+                                       tlvs: ForwardTlvs {
+                                               short_channel_id,
+                                               payment_relay,
+                                               payment_constraints,
+                                               features: BlindedHopFeatures::empty(),
+                                       },
+                                       node_id: details.counterparty.node_id,
+                                       htlc_maximum_msat: details.inbound_htlc_maximum_msat.unwrap_or(u64::MAX),
+                               })
+                       })
+                       .map(|forward_node| {
+                               BlindedPath::new_for_payment(
+                                       &[forward_node], recipient, tlvs.clone(), u64::MAX, entropy_source, secp_ctx
+                               )
+                       })
+                       .take(MAX_PAYMENT_PATHS)
+                       .collect::<Result<Vec<_>, _>>();
+
+               match paths {
+                       Ok(paths) if !paths.is_empty() => Ok(paths),
+                       _ => {
+                               if network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)) {
+                                       BlindedPath::one_hop_for_payment(recipient, tlvs, entropy_source, secp_ctx)
+                                               .map(|path| vec![path])
+                               } else {
+                                       Err(())
+                               }
+                       },
+               }
+       }
+}
+
+impl< G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> MessageRouter for DefaultRouter<G, L, S, SP, Sc> where
+       L::Target: Logger,
+       S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
+{
+       fn find_path(
+               &self, sender: PublicKey, peers: Vec<PublicKey>, destination: Destination
+       ) -> Result<OnionMessagePath, ()> {
+               self.message_router.find_path(sender, peers, destination)
+       }
+
+       fn create_blinded_paths<
+               ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+       >(
+               &self, recipient: PublicKey, peers: Vec<PublicKey>, entropy_source: &ES,
+               secp_ctx: &Secp256k1<T>
+       ) -> Result<Vec<BlindedPath>, ()> {
+               self.message_router.create_blinded_paths(recipient, peers, entropy_source, secp_ctx)
+       }
 }
 
 /// A trait defining behavior for routing a payment.
-pub trait Router {
+pub trait Router: MessageRouter {
        /// Finds a [`Route`] for a payment between the given `payer` and a payee.
        ///
        /// The `payee` and the payment's value are given in [`RouteParameters::payment_params`]
@@ -105,6 +209,16 @@ pub trait Router {
        ) -> Result<Route, LightningError> {
                self.find_route(payer, route_params, first_hops, inflight_htlcs)
        }
+
+       /// Creates [`BlindedPath`]s for payment to the `recipient` node. The channels in `first_hops`
+       /// are assumed to be with the `recipient`'s peers. The payment secret and any constraints are
+       /// given in `tlvs`.
+       fn create_blinded_payment_paths<
+               ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+       >(
+               &self, recipient: PublicKey, first_hops: Vec<ChannelDetails>, tlvs: ReceiveTlvs,
+               amount_msats: u64, entropy_source: &ES, secp_ctx: &Secp256k1<T>
+       ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()>;
 }
 
 /// [`ScoreLookUp`] implementation that factors in in-flight HTLC liquidity.
@@ -130,18 +244,27 @@ impl<'a, S: Deref> ScorerAccountingForInFlightHtlcs<'a, S> where S::Target: Scor
 
 impl<'a, S: Deref> ScoreLookUp for ScorerAccountingForInFlightHtlcs<'a, S> where S::Target: ScoreLookUp {
        type ScoreParams = <S::Target as ScoreLookUp>::ScoreParams;
-       fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams) -> u64 {
+       fn channel_penalty_msat(&self, candidate: &CandidateRouteHop, usage: ChannelUsage, score_params: &Self::ScoreParams) -> u64 {
+               let target = match candidate.target() {
+                       Some(target) => target,
+                       None => return self.scorer.channel_penalty_msat(candidate, usage, score_params),
+               };
+               let short_channel_id = match candidate.short_channel_id() {
+                       Some(short_channel_id) => short_channel_id,
+                       None => return self.scorer.channel_penalty_msat(candidate, usage, score_params),
+               };
+               let source = candidate.source();
                if let Some(used_liquidity) = self.inflight_htlcs.used_liquidity_msat(
-                       source, target, short_channel_id
+                       &source, &target, short_channel_id
                ) {
                        let usage = ChannelUsage {
                                inflight_htlc_msat: usage.inflight_htlc_msat.saturating_add(used_liquidity),
                                ..usage
                        };
 
-                       self.scorer.channel_penalty_msat(short_channel_id, source, target, usage, score_params)
+                       self.scorer.channel_penalty_msat(candidate, usage, score_params)
                } else {
-                       self.scorer.channel_penalty_msat(short_channel_id, source, target, usage, score_params)
+                       self.scorer.channel_penalty_msat(candidate, usage, score_params)
                }
        }
 }
@@ -929,6 +1052,10 @@ impl Readable for RouteHint {
 }
 
 /// A channel descriptor for a hop along a payment path.
+///
+/// While this generally comes from BOLT 11's `r` field, this struct includes more fields than are
+/// available in BOLT 11. Thus, encoding and decoding this via `lightning-invoice` is lossy, as
+/// fields not supported in BOLT 11 will be stripped.
 #[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
 pub struct RouteHintHop {
        /// The node_id of the non-target end of the route
@@ -955,33 +1082,24 @@ impl_writeable_tlv_based!(RouteHintHop, {
 });
 
 #[derive(Eq, PartialEq)]
+#[repr(align(64))] // Force the size to 64 bytes
 struct RouteGraphNode {
        node_id: NodeId,
-       lowest_fee_to_node: u64,
-       total_cltv_delta: u32,
+       score: u64,
        // The maximum value a yet-to-be-constructed payment path might flow through this node.
        // This value is upper-bounded by us by:
        // - how much is needed for a path being constructed
        // - how much value can channels following this node (up to the destination) can contribute,
        //   considering their capacity and fees
        value_contribution_msat: u64,
-       /// The effective htlc_minimum_msat at this hop. If a later hop on the path had a higher HTLC
-       /// minimum, we use it, plus the fees required at each earlier hop to meet it.
-       path_htlc_minimum_msat: u64,
-       /// All penalties incurred from this hop on the way to the destination, as calculated using
-       /// channel scoring.
-       path_penalty_msat: u64,
+       total_cltv_delta: u32,
        /// The number of hops walked up to this node.
        path_length_to_node: u8,
 }
 
 impl cmp::Ord for RouteGraphNode {
        fn cmp(&self, other: &RouteGraphNode) -> cmp::Ordering {
-               let other_score = cmp::max(other.lowest_fee_to_node, other.path_htlc_minimum_msat)
-                       .saturating_add(other.path_penalty_msat);
-               let self_score = cmp::max(self.lowest_fee_to_node, self.path_htlc_minimum_msat)
-                       .saturating_add(self.path_penalty_msat);
-               other_score.cmp(&self_score).then_with(|| other.node_id.cmp(&self.node_id))
+               other.score.cmp(&self.score).then_with(|| other.node_id.cmp(&self.node_id))
        }
 }
 
@@ -991,133 +1109,302 @@ impl cmp::PartialOrd for RouteGraphNode {
        }
 }
 
+// While RouteGraphNode can be laid out with fewer bytes, performance appears to be improved
+// substantially when it is laid out at exactly 64 bytes.
+//
+// Thus, we use `#[repr(C)]` on the struct to force a suboptimal layout and check that it stays 64
+// bytes here.
+#[cfg(any(ldk_bench, not(any(test, fuzzing))))]
+const _GRAPH_NODE_SMALL: usize = 64 - core::mem::size_of::<RouteGraphNode>();
+#[cfg(any(ldk_bench, not(any(test, fuzzing))))]
+const _GRAPH_NODE_FIXED_SIZE: usize = core::mem::size_of::<RouteGraphNode>() - 64;
+
+/// A [`CandidateRouteHop::FirstHop`] entry.
+#[derive(Clone, Debug)]
+pub struct FirstHopCandidate<'a> {
+       /// Channel details of the first hop
+       ///
+       /// [`ChannelDetails::get_outbound_payment_scid`] MUST be `Some` (indicating the channel
+       /// has been funded and is able to pay), and accessor methods may panic otherwise.
+       ///
+       /// [`find_route`] validates this prior to constructing a [`CandidateRouteHop`].
+       pub details: &'a ChannelDetails,
+       /// The node id of the payer, which is also the source side of this candidate route hop.
+       pub payer_node_id: &'a NodeId,
+}
+
+/// A [`CandidateRouteHop::PublicHop`] entry.
+#[derive(Clone, Debug)]
+pub struct PublicHopCandidate<'a> {
+       /// Information about the channel, including potentially its capacity and
+       /// direction-specific information.
+       pub info: DirectedChannelInfo<'a>,
+       /// The short channel ID of the channel, i.e. the identifier by which we refer to this
+       /// channel.
+       pub short_channel_id: u64,
+}
+
+/// A [`CandidateRouteHop::PrivateHop`] entry.
+#[derive(Clone, Debug)]
+pub struct PrivateHopCandidate<'a> {
+       /// Information about the private hop communicated via BOLT 11.
+       pub hint: &'a RouteHintHop,
+       /// Node id of the next hop in BOLT 11 route hint.
+       pub target_node_id: &'a NodeId
+}
+
+/// A [`CandidateRouteHop::Blinded`] entry.
+#[derive(Clone, Debug)]
+pub struct BlindedPathCandidate<'a> {
+       /// Information about the blinded path including the fee, HTLC amount limits, and
+       /// cryptographic material required to build an HTLC through the given path.
+       pub hint: &'a (BlindedPayInfo, BlindedPath),
+       /// Index of the hint in the original list of blinded hints.
+       ///
+       /// This is used to cheaply uniquely identify this blinded path, even though we don't have
+       /// a short channel ID for this hop.
+       hint_idx: usize,
+}
+
+/// A [`CandidateRouteHop::OneHopBlinded`] entry.
+#[derive(Clone, Debug)]
+pub struct OneHopBlindedPathCandidate<'a> {
+       /// Information about the blinded path including the fee, HTLC amount limits, and
+       /// cryptographic material required to build an HTLC terminating with the given path.
+       ///
+       /// Note that the [`BlindedPayInfo`] is ignored here.
+       pub hint: &'a (BlindedPayInfo, BlindedPath),
+       /// Index of the hint in the original list of blinded hints.
+       ///
+       /// This is used to cheaply uniquely identify this blinded path, even though we don't have
+       /// a short channel ID for this hop.
+       hint_idx: usize,
+}
+
 /// A wrapper around the various hop representations.
 ///
-/// Used to construct a [`PathBuildingHop`] and to estimate [`EffectiveCapacity`].
+/// Can be used to examine the properties of a hop,
+/// potentially to decide whether to include it in a route.
 #[derive(Clone, Debug)]
-enum CandidateRouteHop<'a> {
+pub enum CandidateRouteHop<'a> {
        /// A hop from the payer, where the outbound liquidity is known.
-       FirstHop {
-               details: &'a ChannelDetails,
-       },
-       /// A hop found in the [`ReadOnlyNetworkGraph`], where the channel capacity may be unknown.
-       PublicHop {
-               info: DirectedChannelInfo<'a>,
-               short_channel_id: u64,
-       },
-       /// A hop to the payee found in the BOLT 11 payment invoice, though not necessarily a direct
-       /// channel.
-       PrivateHop {
-               hint: &'a RouteHintHop,
-       },
-       /// The payee's identity is concealed behind blinded paths provided in a BOLT 12 invoice.
-       Blinded {
-               hint: &'a (BlindedPayInfo, BlindedPath),
-               hint_idx: usize,
-       },
-       /// Similar to [`Self::Blinded`], but the path here has 1 blinded hop. `BlindedPayInfo` provided
-       /// for 1-hop blinded paths is ignored because it is meant to apply to the hops *between* the
-       /// introduction node and the destination. Useful for tracking that we need to include a blinded
-       /// path at the end of our [`Route`].
-       OneHopBlinded {
-               hint: &'a (BlindedPayInfo, BlindedPath),
-               hint_idx: usize,
-       },
+       FirstHop(FirstHopCandidate<'a>),
+       /// A hop found in the [`ReadOnlyNetworkGraph`].
+       PublicHop(PublicHopCandidate<'a>),
+       /// A private hop communicated by the payee, generally via a BOLT 11 invoice.
+       ///
+       /// Because BOLT 11 route hints can take multiple hops to get to the destination, this may not
+       /// terminate at the payee.
+       PrivateHop(PrivateHopCandidate<'a>),
+       /// A blinded path which starts with an introduction point and ultimately terminates with the
+       /// payee.
+       ///
+       /// Because we don't know the payee's identity, [`CandidateRouteHop::target`] will return
+       /// `None` in this state.
+       ///
+       /// Because blinded paths are "all or nothing", and we cannot use just one part of a blinded
+       /// path, the full path is treated as a single [`CandidateRouteHop`].
+       Blinded(BlindedPathCandidate<'a>),
+       /// Similar to [`Self::Blinded`], but the path here only has one hop.
+       ///
+       /// While we treat this similarly to [`CandidateRouteHop::Blinded`] in many respects (e.g.
+       /// returning `None` from [`CandidateRouteHop::target`]), in this case we do actually know the
+       /// payee's identity - it's the introduction point!
+       ///
+       /// [`BlindedPayInfo`] provided for 1-hop blinded paths is ignored because it is meant to apply
+       /// to the hops *between* the introduction node and the destination.
+       ///
+       /// This primarily exists to track that we need to included a blinded path at the end of our
+       /// [`Route`], even though it doesn't actually add an additional hop in the payment.
+       OneHopBlinded(OneHopBlindedPathCandidate<'a>),
 }
 
 impl<'a> CandidateRouteHop<'a> {
+       /// Returns the short channel ID for this hop, if one is known.
+       ///
+       /// This SCID could be an alias or a globally unique SCID, and thus is only expected to
+       /// uniquely identify this channel in conjunction with the [`CandidateRouteHop::source`].
+       ///
+       /// Returns `Some` as long as the candidate is a [`CandidateRouteHop::PublicHop`], a
+       /// [`CandidateRouteHop::PrivateHop`] from a BOLT 11 route hint, or a
+       /// [`CandidateRouteHop::FirstHop`] with a known [`ChannelDetails::get_outbound_payment_scid`]
+       /// (which is always true for channels which are funded and ready for use).
+       ///
+       /// In other words, this should always return `Some` as long as the candidate hop is not a
+       /// [`CandidateRouteHop::Blinded`] or a [`CandidateRouteHop::OneHopBlinded`].
+       ///
+       /// Note that this is deliberately not public as it is somewhat of a footgun because it doesn't
+       /// define a global namespace.
+       #[inline]
        fn short_channel_id(&self) -> Option<u64> {
                match self {
-                       CandidateRouteHop::FirstHop { details } => Some(details.get_outbound_payment_scid().unwrap()),
-                       CandidateRouteHop::PublicHop { short_channel_id, .. } => Some(*short_channel_id),
-                       CandidateRouteHop::PrivateHop { hint } => Some(hint.short_channel_id),
-                       CandidateRouteHop::Blinded { .. } => None,
-                       CandidateRouteHop::OneHopBlinded { .. } => None,
+                       CandidateRouteHop::FirstHop(hop) => hop.details.get_outbound_payment_scid(),
+                       CandidateRouteHop::PublicHop(hop) => Some(hop.short_channel_id),
+                       CandidateRouteHop::PrivateHop(hop) => Some(hop.hint.short_channel_id),
+                       CandidateRouteHop::Blinded(_) => None,
+                       CandidateRouteHop::OneHopBlinded(_) => None,
+               }
+       }
+
+       /// Returns the globally unique short channel ID for this hop, if one is known.
+       ///
+       /// This only returns `Some` if the channel is public (either our own, or one we've learned
+       /// from the public network graph), and thus the short channel ID we have for this channel is
+       /// globally unique and identifies this channel in a global namespace.
+       #[inline]
+       pub fn globally_unique_short_channel_id(&self) -> Option<u64> {
+               match self {
+                       CandidateRouteHop::FirstHop(hop) => if hop.details.is_public { hop.details.short_channel_id } else { None },
+                       CandidateRouteHop::PublicHop(hop) => Some(hop.short_channel_id),
+                       CandidateRouteHop::PrivateHop(_) => None,
+                       CandidateRouteHop::Blinded(_) => None,
+                       CandidateRouteHop::OneHopBlinded(_) => None,
                }
        }
 
        // NOTE: This may alloc memory so avoid calling it in a hot code path.
        fn features(&self) -> ChannelFeatures {
                match self {
-                       CandidateRouteHop::FirstHop { details } => details.counterparty.features.to_context(),
-                       CandidateRouteHop::PublicHop { info, .. } => info.channel().features.clone(),
-                       CandidateRouteHop::PrivateHop { .. } => ChannelFeatures::empty(),
-                       CandidateRouteHop::Blinded { .. } => ChannelFeatures::empty(),
-                       CandidateRouteHop::OneHopBlinded { .. } => ChannelFeatures::empty(),
+                       CandidateRouteHop::FirstHop(hop) => hop.details.counterparty.features.to_context(),
+                       CandidateRouteHop::PublicHop(hop) => hop.info.channel().features.clone(),
+                       CandidateRouteHop::PrivateHop(_) => ChannelFeatures::empty(),
+                       CandidateRouteHop::Blinded(_) => ChannelFeatures::empty(),
+                       CandidateRouteHop::OneHopBlinded(_) => ChannelFeatures::empty(),
                }
        }
 
-       fn cltv_expiry_delta(&self) -> u32 {
+       /// Returns the required difference in HTLC CLTV expiry between the [`Self::source`] and the
+       /// next-hop for an HTLC taking this hop.
+       ///
+       /// This is the time that the node(s) in this hop have to claim the HTLC on-chain if the
+       /// next-hop goes on chain with a payment preimage.
+       #[inline]
+       pub fn cltv_expiry_delta(&self) -> u32 {
                match self {
-                       CandidateRouteHop::FirstHop { .. } => 0,
-                       CandidateRouteHop::PublicHop { info, .. } => info.direction().cltv_expiry_delta as u32,
-                       CandidateRouteHop::PrivateHop { hint } => hint.cltv_expiry_delta as u32,
-                       CandidateRouteHop::Blinded { hint, .. } => hint.0.cltv_expiry_delta as u32,
-                       CandidateRouteHop::OneHopBlinded { .. } => 0,
+                       CandidateRouteHop::FirstHop(_) => 0,
+                       CandidateRouteHop::PublicHop(hop) => hop.info.direction().cltv_expiry_delta as u32,
+                       CandidateRouteHop::PrivateHop(hop) => hop.hint.cltv_expiry_delta as u32,
+                       CandidateRouteHop::Blinded(hop) => hop.hint.0.cltv_expiry_delta as u32,
+                       CandidateRouteHop::OneHopBlinded(_) => 0,
                }
        }
 
-       fn htlc_minimum_msat(&self) -> u64 {
+       /// Returns the minimum amount that can be sent over this hop, in millisatoshis.
+       #[inline]
+       pub fn htlc_minimum_msat(&self) -> u64 {
                match self {
-                       CandidateRouteHop::FirstHop { details } => details.next_outbound_htlc_minimum_msat,
-                       CandidateRouteHop::PublicHop { info, .. } => info.direction().htlc_minimum_msat,
-                       CandidateRouteHop::PrivateHop { hint } => hint.htlc_minimum_msat.unwrap_or(0),
-                       CandidateRouteHop::Blinded { hint, .. } => hint.0.htlc_minimum_msat,
+                       CandidateRouteHop::FirstHop(hop) => hop.details.next_outbound_htlc_minimum_msat,
+                       CandidateRouteHop::PublicHop(hop) => hop.info.direction().htlc_minimum_msat,
+                       CandidateRouteHop::PrivateHop(hop) => hop.hint.htlc_minimum_msat.unwrap_or(0),
+                       CandidateRouteHop::Blinded(hop) => hop.hint.0.htlc_minimum_msat,
                        CandidateRouteHop::OneHopBlinded { .. } => 0,
                }
        }
 
-       fn fees(&self) -> RoutingFees {
+       /// Returns the fees that must be paid to route an HTLC over this channel.
+       #[inline]
+       pub fn fees(&self) -> RoutingFees {
                match self {
-                       CandidateRouteHop::FirstHop { .. } => RoutingFees {
+                       CandidateRouteHop::FirstHop(_) => RoutingFees {
                                base_msat: 0, proportional_millionths: 0,
                        },
-                       CandidateRouteHop::PublicHop { info, .. } => info.direction().fees,
-                       CandidateRouteHop::PrivateHop { hint } => hint.fees,
-                       CandidateRouteHop::Blinded { hint, .. } => {
+                       CandidateRouteHop::PublicHop(hop) => hop.info.direction().fees,
+                       CandidateRouteHop::PrivateHop(hop) => hop.hint.fees,
+                       CandidateRouteHop::Blinded(hop) => {
                                RoutingFees {
-                                       base_msat: hint.0.fee_base_msat,
-                                       proportional_millionths: hint.0.fee_proportional_millionths
+                                       base_msat: hop.hint.0.fee_base_msat,
+                                       proportional_millionths: hop.hint.0.fee_proportional_millionths
                                }
                        },
-                       CandidateRouteHop::OneHopBlinded { .. } =>
+                       CandidateRouteHop::OneHopBlinded(_) =>
                                RoutingFees { base_msat: 0, proportional_millionths: 0 },
                }
        }
 
+       /// Fetch the effective capacity of this hop.
+       ///
+       /// Note that this may be somewhat expensive, so calls to this should be limited and results
+       /// cached!
        fn effective_capacity(&self) -> EffectiveCapacity {
                match self {
-                       CandidateRouteHop::FirstHop { details } => EffectiveCapacity::ExactLiquidity {
-                               liquidity_msat: details.next_outbound_htlc_limit_msat,
+                       CandidateRouteHop::FirstHop(hop) => EffectiveCapacity::ExactLiquidity {
+                               liquidity_msat: hop.details.next_outbound_htlc_limit_msat,
                        },
-                       CandidateRouteHop::PublicHop { info, .. } => info.effective_capacity(),
-                       CandidateRouteHop::PrivateHop { hint: RouteHintHop { htlc_maximum_msat: Some(max), .. }} =>
+                       CandidateRouteHop::PublicHop(hop) => hop.info.effective_capacity(),
+                       CandidateRouteHop::PrivateHop(PrivateHopCandidate { hint: RouteHintHop { htlc_maximum_msat: Some(max), .. }, .. }) =>
                                EffectiveCapacity::HintMaxHTLC { amount_msat: *max },
-                       CandidateRouteHop::PrivateHop { hint: RouteHintHop { htlc_maximum_msat: None, .. }} =>
+                       CandidateRouteHop::PrivateHop(PrivateHopCandidate { hint: RouteHintHop { htlc_maximum_msat: None, .. }, .. }) =>
                                EffectiveCapacity::Infinite,
-                       CandidateRouteHop::Blinded { hint, .. } =>
-                               EffectiveCapacity::HintMaxHTLC { amount_msat: hint.0.htlc_maximum_msat },
-                       CandidateRouteHop::OneHopBlinded { .. } => EffectiveCapacity::Infinite,
+                       CandidateRouteHop::Blinded(hop) =>
+                               EffectiveCapacity::HintMaxHTLC { amount_msat: hop.hint.0.htlc_maximum_msat },
+                       CandidateRouteHop::OneHopBlinded(_) => EffectiveCapacity::Infinite,
                }
        }
 
-       fn id(&self, channel_direction: bool /* src_node_id < target_node_id */) -> CandidateHopId {
+       /// Returns an ID describing the given hop.
+       ///
+       /// See the docs on [`CandidateHopId`] for when this is, or is not, unique.
+       #[inline]
+       fn id(&self) -> CandidateHopId {
                match self {
-                       CandidateRouteHop::Blinded { hint_idx, .. } => CandidateHopId::Blinded(*hint_idx),
-                       CandidateRouteHop::OneHopBlinded { hint_idx, .. } => CandidateHopId::Blinded(*hint_idx),
-                       _ => CandidateHopId::Clear((self.short_channel_id().unwrap(), channel_direction)),
+                       CandidateRouteHop::Blinded(hop) => CandidateHopId::Blinded(hop.hint_idx),
+                       CandidateRouteHop::OneHopBlinded(hop) => CandidateHopId::Blinded(hop.hint_idx),
+                       _ => CandidateHopId::Clear((self.short_channel_id().unwrap(), self.source() < self.target().unwrap())),
                }
        }
        fn blinded_path(&self) -> Option<&'a BlindedPath> {
                match self {
-                       CandidateRouteHop::Blinded { hint, .. } | CandidateRouteHop::OneHopBlinded { hint, .. } => {
+                       CandidateRouteHop::Blinded(BlindedPathCandidate { hint, .. }) | CandidateRouteHop::OneHopBlinded(OneHopBlindedPathCandidate { hint, .. }) => {
                                Some(&hint.1)
                        },
                        _ => None,
                }
        }
+       /// Returns the source node id of current hop.
+       ///
+       /// Source node id refers to the node forwarding the HTLC through this hop.
+       ///
+       /// For [`Self::FirstHop`] we return payer's node id.
+       #[inline]
+       pub fn source(&self) -> NodeId {
+               match self {
+                       CandidateRouteHop::FirstHop(hop) => *hop.payer_node_id,
+                       CandidateRouteHop::PublicHop(hop) => *hop.info.source(),
+                       CandidateRouteHop::PrivateHop(hop) => hop.hint.src_node_id.into(),
+                       CandidateRouteHop::Blinded(hop) => hop.hint.1.introduction_node_id.into(),
+                       CandidateRouteHop::OneHopBlinded(hop) => hop.hint.1.introduction_node_id.into(),
+               }
+       }
+       /// Returns the target node id of this hop, if known.
+       ///
+       /// Target node id refers to the node receiving the HTLC after this hop.
+       ///
+       /// For [`Self::Blinded`] we return `None` because the ultimate destination after the blinded
+       /// path is unknown.
+       ///
+       /// For [`Self::OneHopBlinded`] we return `None` because the target is the same as the source,
+       /// and such a return value would be somewhat nonsensical.
+       #[inline]
+       pub fn target(&self) -> Option<NodeId> {
+               match self {
+                       CandidateRouteHop::FirstHop(hop) => Some(hop.details.counterparty.node_id.into()),
+                       CandidateRouteHop::PublicHop(hop) => Some(*hop.info.target()),
+                       CandidateRouteHop::PrivateHop(hop) => Some(*hop.target_node_id),
+                       CandidateRouteHop::Blinded(_) => None,
+                       CandidateRouteHop::OneHopBlinded(_) => None,
+               }
+       }
 }
 
+/// A unique(ish) identifier for a specific [`CandidateRouteHop`].
+///
+/// For blinded paths, this ID is unique only within a given [`find_route`] call.
+///
+/// For other hops, because SCIDs between private channels and public channels can conflict, this
+/// isn't guaranteed to be unique at all.
+///
+/// For our uses, this is generally fine, but it is not public as it is otherwise a rather
+/// difficult-to-use API.
 #[derive(Clone, Copy, Eq, Hash, Ord, PartialOrd, PartialEq)]
 enum CandidateHopId {
        /// Contains (scid, src_node_id < target_node_id)
@@ -1159,18 +1446,15 @@ fn iter_equal<I1: Iterator, I2: Iterator>(mut iter_a: I1, mut iter_b: I2)
 /// Fee values should be updated only in the context of the whole path, see update_value_and_recompute_fees.
 /// These fee values are useful to choose hops as we traverse the graph "payee-to-payer".
 #[derive(Clone)]
+#[repr(C)] // Force fields to appear in the order we define them.
 struct PathBuildingHop<'a> {
-       // Note that this should be dropped in favor of loading it from CandidateRouteHop, but doing so
-       // is a larger refactor and will require careful performance analysis.
-       node_id: NodeId,
        candidate: CandidateRouteHop<'a>,
-       fee_msat: u64,
-
-       /// All the fees paid *after* this channel on the way to the destination
-       next_hops_fee_msat: u64,
-       /// Fee paid for the use of the current channel (see candidate.fees()).
-       /// The value will be actually deducted from the counterparty balance on the previous link.
-       hop_use_fee_msat: u64,
+       /// If we've already processed a node as the best node, we shouldn't process it again. Normally
+       /// we'd just ignore it if we did as all channels would have a higher new fee, but because we
+       /// may decrease the amounts in use as we walk the graph, the actual calculated fee may
+       /// decrease as well. Thus, we have to explicitly track which nodes have been processed and
+       /// avoid processing them again.
+       was_processed: bool,
        /// Used to compare channels when choosing the for routing.
        /// Includes paying for the use of a hop and the following hops, as well as
        /// an estimated cost of reaching this hop.
@@ -1182,12 +1466,20 @@ struct PathBuildingHop<'a> {
        /// All penalties incurred from this channel on the way to the destination, as calculated using
        /// channel scoring.
        path_penalty_msat: u64,
-       /// If we've already processed a node as the best node, we shouldn't process it again. Normally
-       /// we'd just ignore it if we did as all channels would have a higher new fee, but because we
-       /// may decrease the amounts in use as we walk the graph, the actual calculated fee may
-       /// decrease as well. Thus, we have to explicitly track which nodes have been processed and
-       /// avoid processing them again.
-       was_processed: bool,
+
+       // The last 16 bytes are on the next cache line by default in glibc's malloc. Thus, we should
+       // only place fields which are not hot there. Luckily, the next three fields are only read if
+       // we end up on the selected path, and only in the final path layout phase, so we don't care
+       // too much if reading them is slow.
+
+       fee_msat: u64,
+
+       /// All the fees paid *after* this channel on the way to the destination
+       next_hops_fee_msat: u64,
+       /// Fee paid for the use of the current channel (see candidate.fees()).
+       /// The value will be actually deducted from the counterparty balance on the previous link.
+       hop_use_fee_msat: u64,
+
        #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
        // In tests, we apply further sanity checks on cases where we skip nodes we already processed
        // to ensure it is specifically in cases where the fee has gone down because of a decrease in
@@ -1196,11 +1488,23 @@ struct PathBuildingHop<'a> {
        value_contribution_msat: u64,
 }
 
+// Checks that the entries in the `find_route` `dist` map fit in (exactly) two standard x86-64
+// cache lines. Sadly, they're not guaranteed to actually lie on a cache line (and in fact,
+// generally won't, because at least glibc's malloc will align to a nice, big, round
+// boundary...plus 16), but at least it will reduce the amount of data we'll need to load.
+//
+// Note that these assertions only pass on somewhat recent rustc, and thus are gated on the
+// ldk_bench flag.
+#[cfg(ldk_bench)]
+const _NODE_MAP_SIZE_TWO_CACHE_LINES: usize = 128 - core::mem::size_of::<(NodeId, PathBuildingHop)>();
+#[cfg(ldk_bench)]
+const _NODE_MAP_SIZE_EXACTLY_CACHE_LINES: usize = core::mem::size_of::<(NodeId, PathBuildingHop)>() - 128;
+
 impl<'a> core::fmt::Debug for PathBuildingHop<'a> {
        fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
                let mut debug_struct = f.debug_struct("PathBuildingHop");
                debug_struct
-                       .field("node_id", &self.node_id)
+                       .field("node_id", &self.candidate.target())
                        .field("short_channel_id", &self.candidate.short_channel_id())
                        .field("total_fee_msat", &self.total_fee_msat)
                        .field("next_hops_fee_msat", &self.next_hops_fee_msat)
@@ -1382,17 +1686,17 @@ struct LoggedCandidateHop<'a>(&'a CandidateRouteHop<'a>);
 impl<'a> fmt::Display for LoggedCandidateHop<'a> {
        fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
                match self.0 {
-                       CandidateRouteHop::Blinded { hint, .. } | CandidateRouteHop::OneHopBlinded { hint, .. } => {
+                       CandidateRouteHop::Blinded(BlindedPathCandidate { hint, .. }) | CandidateRouteHop::OneHopBlinded(OneHopBlindedPathCandidate { hint, .. }) => {
                                "blinded route hint with introduction node id ".fmt(f)?;
                                hint.1.introduction_node_id.fmt(f)?;
                                " and blinding point ".fmt(f)?;
                                hint.1.blinding_point.fmt(f)
                        },
-                       CandidateRouteHop::FirstHop { .. } => {
+                       CandidateRouteHop::FirstHop(_) => {
                                "first hop with SCID ".fmt(f)?;
                                self.0.short_channel_id().unwrap().fmt(f)
                        },
-                       CandidateRouteHop::PrivateHop { .. } => {
+                       CandidateRouteHop::PrivateHop(_) => {
                                "route hint with SCID ".fmt(f)?;
                                self.0.short_channel_id().unwrap().fmt(f)
                        },
@@ -1650,6 +1954,20 @@ where L::Target: Logger {
                }
        }
 
+       let mut private_hop_key_cache = HashMap::with_capacity(
+               payment_params.payee.unblinded_route_hints().iter().map(|path| path.0.len()).sum()
+       );
+
+       // Because we store references to private hop node_ids in `dist`, below, we need them to exist
+       // (as `NodeId`, not `PublicKey`) for the lifetime of `dist`. Thus, we calculate all the keys
+       // we'll need here and simply fetch them when routing.
+       private_hop_key_cache.insert(maybe_dummy_payee_pk, NodeId::from_pubkey(&maybe_dummy_payee_pk));
+       for route in payment_params.payee.unblinded_route_hints().iter() {
+               for hop in route.0.iter() {
+                       private_hop_key_cache.insert(hop.src_node_id, NodeId::from_pubkey(&hop.src_node_id));
+               }
+       }
+
        // The main heap containing all candidate next-hops sorted by their score (max(fee,
        // htlc_minimum)). Ideally this would be a heap which allowed cheap score reduction instead of
        // adding duplicate entries when we find a better path to a given node.
@@ -1722,11 +2040,11 @@ where L::Target: Logger {
        let mut num_ignored_htlc_minimum_msat_limit: u32 = 0;
 
        macro_rules! add_entry {
-               // Adds entry which goes from $src_node_id to $dest_node_id over the $candidate hop.
+               // Adds entry which goes from $candidate.source() to $candidate.target() over the $candidate hop.
                // $next_hops_fee_msat represents the fees paid for using all the channels *after* this one,
                // since that value has to be transferred over this channel.
                // Returns the contribution amount of $candidate if the channel caused an update to `targets`.
-               ( $candidate: expr, $src_node_id: expr, $dest_node_id: expr, $next_hops_fee_msat: expr,
+               ( $candidate: expr, $next_hops_fee_msat: expr,
                        $next_hops_value_contribution: expr, $next_hops_path_htlc_minimum_msat: expr,
                        $next_hops_path_penalty_msat: expr, $next_hops_cltv_delta: expr, $next_hops_path_length: expr ) => { {
                        // We "return" whether we updated the path at the end, and how much we can route via
@@ -1736,7 +2054,8 @@ where L::Target: Logger {
                        // practice these cases should be caught earlier:
                        // - for regular channels at channel announcement (TODO)
                        // - for first and last hops early in get_route
-                       if $src_node_id != $dest_node_id {
+                       let src_node_id = $candidate.source();
+                       if Some(src_node_id) != $candidate.target() {
                                let scid_opt = $candidate.short_channel_id();
                                let effective_capacity = $candidate.effective_capacity();
                                let htlc_maximum_msat = max_htlc_from_capacity(effective_capacity, channel_saturation_pow_half);
@@ -1750,7 +2069,7 @@ where L::Target: Logger {
                                // We do this for now, but this is a subject for removal.
                                if let Some(mut available_value_contribution_msat) = htlc_maximum_msat.checked_sub($next_hops_fee_msat) {
                                        let used_liquidity_msat = used_liquidities
-                                               .get(&$candidate.id($src_node_id < $dest_node_id))
+                                               .get(&$candidate.id())
                                                .map_or(0, |used_liquidity_msat| {
                                                        available_value_contribution_msat = available_value_contribution_msat
                                                                .saturating_sub(*used_liquidity_msat);
@@ -1795,12 +2114,12 @@ where L::Target: Logger {
                                        let payment_failed_on_this_channel = scid_opt.map_or(false,
                                                |scid| payment_params.previously_failed_channels.contains(&scid));
 
-                                       let should_log_candidate = match $candidate {
-                                               CandidateRouteHop::FirstHop { .. } => true,
-                                               CandidateRouteHop::PrivateHop { .. } => true,
-                                               CandidateRouteHop::Blinded { .. } => true,
-                                               CandidateRouteHop::OneHopBlinded { .. } => true,
-                                               _ => false,
+                                       let (should_log_candidate, first_hop_details) = match $candidate {
+                                               CandidateRouteHop::FirstHop(hop) => (true, Some(hop.details)),
+                                               CandidateRouteHop::PrivateHop(_) => (true, None),
+                                               CandidateRouteHop::Blinded(_) => (true, None),
+                                               CandidateRouteHop::OneHopBlinded(_) => (true, None),
+                                               _ => (false, None),
                                        };
 
                                        // If HTLC minimum is larger than the amount we're going to transfer, we shouldn't
@@ -1810,6 +2129,13 @@ where L::Target: Logger {
                                        if !contributes_sufficient_value {
                                                if should_log_candidate {
                                                        log_trace!(logger, "Ignoring {} due to insufficient value contribution.", LoggedCandidateHop(&$candidate));
+
+                                                       if let Some(details) = first_hop_details {
+                                                               log_trace!(logger,
+                                                                       "First hop candidate next_outbound_htlc_limit_msat: {}",
+                                                                       details.next_outbound_htlc_limit_msat,
+                                                               );
+                                                       }
                                                }
                                                num_ignored_value_contribution += 1;
                                        } else if exceeds_max_path_length {
@@ -1820,6 +2146,14 @@ where L::Target: Logger {
                                        } else if exceeds_cltv_delta_limit {
                                                if should_log_candidate {
                                                        log_trace!(logger, "Ignoring {} due to exceeding CLTV delta limit.", LoggedCandidateHop(&$candidate));
+
+                                                       if let Some(_) = first_hop_details {
+                                                               log_trace!(logger,
+                                                                       "First hop candidate cltv_expiry_delta: {}. Limit: {}",
+                                                                       hop_total_cltv_delta,
+                                                                       max_total_cltv_expiry_delta,
+                                                               );
+                                                       }
                                                }
                                                num_ignored_cltv_delta_limit += 1;
                                        } else if payment_failed_on_this_channel {
@@ -1832,6 +2166,13 @@ where L::Target: Logger {
                                                        log_trace!(logger,
                                                                "Ignoring {} to avoid overpaying to meet htlc_minimum_msat limit.",
                                                                LoggedCandidateHop(&$candidate));
+
+                                                       if let Some(details) = first_hop_details {
+                                                               log_trace!(logger,
+                                                                       "First hop candidate next_outbound_htlc_minimum_msat: {}",
+                                                                       details.next_outbound_htlc_minimum_msat,
+                                                               );
+                                                       }
                                                }
                                                num_ignored_avoid_overpayment += 1;
                                                hit_minimum_limit = true;
@@ -1845,15 +2186,14 @@ where L::Target: Logger {
                                                );
                                                let path_htlc_minimum_msat = compute_fees_saturating(curr_min, $candidate.fees())
                                                        .saturating_add(curr_min);
-                                               let hm_entry = dist.entry($src_node_id);
+                                               let hm_entry = dist.entry(src_node_id);
                                                let old_entry = hm_entry.or_insert_with(|| {
                                                        // If there was previously no known way to access the source node
                                                        // (recall it goes payee-to-payer) of short_channel_id, first add a
                                                        // semi-dummy record just to compute the fees to reach the source node.
                                                        // This will affect our decision on selecting short_channel_id
-                                                       // as a way to reach the $dest_node_id.
+                                                       // as a way to reach the $candidate.target() node.
                                                        PathBuildingHop {
-                                                               node_id: $dest_node_id.clone(),
                                                                candidate: $candidate.clone(),
                                                                fee_msat: 0,
                                                                next_hops_fee_msat: u64::max_value(),
@@ -1882,7 +2222,7 @@ where L::Target: Logger {
 
                                                        // Ignore hop_use_fee_msat for channel-from-us as we assume all channels-from-us
                                                        // will have the same effective-fee
-                                                       if $src_node_id != our_node_id {
+                                                       if src_node_id != our_node_id {
                                                                // Note that `u64::max_value` means we'll always fail the
                                                                // `old_entry.total_fee_msat > total_fee_msat` check below
                                                                hop_use_fee_msat = compute_fees_saturating(amount_to_transfer_over_msat, $candidate.fees());
@@ -1893,6 +2233,14 @@ where L::Target: Logger {
                                                        if total_fee_msat > max_total_routing_fee_msat {
                                                                if should_log_candidate {
                                                                        log_trace!(logger, "Ignoring {} due to exceeding max total routing fee limit.", LoggedCandidateHop(&$candidate));
+
+                                                                       if let Some(_) = first_hop_details {
+                                                                               log_trace!(logger,
+                                                                                       "First hop candidate routing fee: {}. Limit: {}",
+                                                                                       total_fee_msat,
+                                                                                       max_total_routing_fee_msat,
+                                                                               );
+                                                                       }
                                                                }
                                                                num_ignored_total_fee_limit += 1;
                                                        } else {
@@ -1901,22 +2249,15 @@ where L::Target: Logger {
                                                                        inflight_htlc_msat: used_liquidity_msat,
                                                                        effective_capacity,
                                                                };
-                                                               let channel_penalty_msat = scid_opt.map_or(0,
-                                                                       |scid| scorer.channel_penalty_msat(scid, &$src_node_id, &$dest_node_id,
-                                                                               channel_usage, score_params));
+                                                               let channel_penalty_msat =
+                                                                       scorer.channel_penalty_msat($candidate,
+                                                                               channel_usage,
+                                                                               score_params);
                                                                let path_penalty_msat = $next_hops_path_penalty_msat
                                                                        .saturating_add(channel_penalty_msat);
-                                                               let new_graph_node = RouteGraphNode {
-                                                                       node_id: $src_node_id,
-                                                                       lowest_fee_to_node: total_fee_msat,
-                                                                       total_cltv_delta: hop_total_cltv_delta,
-                                                                       value_contribution_msat,
-                                                                       path_htlc_minimum_msat,
-                                                                       path_penalty_msat,
-                                                                       path_length_to_node,
-                                                               };
 
-                                                               // Update the way of reaching $src_node_id with the given short_channel_id (from $dest_node_id),
+                                                               // Update the way of reaching $candidate.source()
+                                                               // with the given short_channel_id (from $candidate.target()),
                                                                // if this way is cheaper than the already known
                                                                // (considering the cost to "reach" this channel from the route destination,
                                                                // the cost of using this channel,
@@ -1938,11 +2279,17 @@ where L::Target: Logger {
                                                                        .saturating_add(path_penalty_msat);
 
                                                                if !old_entry.was_processed && new_cost < old_cost {
+                                                                       let new_graph_node = RouteGraphNode {
+                                                                               node_id: src_node_id,
+                                                                               score: cmp::max(total_fee_msat, path_htlc_minimum_msat).saturating_add(path_penalty_msat),
+                                                                               total_cltv_delta: hop_total_cltv_delta,
+                                                                               value_contribution_msat,
+                                                                               path_length_to_node,
+                                                                       };
                                                                        targets.push(new_graph_node);
                                                                        old_entry.next_hops_fee_msat = $next_hops_fee_msat;
                                                                        old_entry.hop_use_fee_msat = hop_use_fee_msat;
                                                                        old_entry.total_fee_msat = total_fee_msat;
-                                                                       old_entry.node_id = $dest_node_id.clone();
                                                                        old_entry.candidate = $candidate.clone();
                                                                        old_entry.fee_msat = 0; // This value will be later filled with hop_use_fee_msat of the following channel
                                                                        old_entry.path_htlc_minimum_msat = path_htlc_minimum_msat;
@@ -1988,6 +2335,13 @@ where L::Target: Logger {
                                                        log_trace!(logger,
                                                                "Ignoring {} due to its htlc_minimum_msat limit.",
                                                                LoggedCandidateHop(&$candidate));
+
+                                                       if let Some(details) = first_hop_details {
+                                                               log_trace!(logger,
+                                                                       "First hop candidate next_outbound_htlc_minimum_msat: {}",
+                                                                       details.next_outbound_htlc_minimum_msat,
+                                                               );
+                                                       }
                                                }
                                                num_ignored_htlc_minimum_msat_limit += 1;
                                        }
@@ -2005,28 +2359,38 @@ where L::Target: Logger {
        // meaning how much will be paid in fees after this node (to the best of our knowledge).
        // This data can later be helpful to optimize routing (pay lower fees).
        macro_rules! add_entries_to_cheapest_to_target_node {
-               ( $node: expr, $node_id: expr, $fee_to_target_msat: expr, $next_hops_value_contribution: expr,
-                 $next_hops_path_htlc_minimum_msat: expr, $next_hops_path_penalty_msat: expr,
+               ( $node: expr, $node_id: expr, $next_hops_value_contribution: expr,
                  $next_hops_cltv_delta: expr, $next_hops_path_length: expr ) => {
+                       let fee_to_target_msat;
+                       let next_hops_path_htlc_minimum_msat;
+                       let next_hops_path_penalty_msat;
                        let skip_node = if let Some(elem) = dist.get_mut(&$node_id) {
                                let was_processed = elem.was_processed;
                                elem.was_processed = true;
+                               fee_to_target_msat = elem.total_fee_msat;
+                               next_hops_path_htlc_minimum_msat = elem.path_htlc_minimum_msat;
+                               next_hops_path_penalty_msat = elem.path_penalty_msat;
                                was_processed
                        } else {
                                // Entries are added to dist in add_entry!() when there is a channel from a node.
                                // Because there are no channels from payee, it will not have a dist entry at this point.
                                // If we're processing any other node, it is always be the result of a channel from it.
                                debug_assert_eq!($node_id, maybe_dummy_payee_node_id);
+                               fee_to_target_msat = 0;
+                               next_hops_path_htlc_minimum_msat = 0;
+                               next_hops_path_penalty_msat = 0;
                                false
                        };
 
                        if !skip_node {
                                if let Some(first_channels) = first_hop_targets.get(&$node_id) {
                                        for details in first_channels {
-                                               let candidate = CandidateRouteHop::FirstHop { details };
-                                               add_entry!(candidate, our_node_id, $node_id, $fee_to_target_msat,
+                                               let candidate = CandidateRouteHop::FirstHop(FirstHopCandidate {
+                                                       details, payer_node_id: &our_node_id,
+                                               });
+                                               add_entry!(&candidate, fee_to_target_msat,
                                                        $next_hops_value_contribution,
-                                                       $next_hops_path_htlc_minimum_msat, $next_hops_path_penalty_msat,
+                                                       next_hops_path_htlc_minimum_msat, next_hops_path_penalty_msat,
                                                        $next_hops_cltv_delta, $next_hops_path_length);
                                        }
                                }
@@ -2044,15 +2408,15 @@ where L::Target: Logger {
                                                        if let Some((directed_channel, source)) = chan.as_directed_to(&$node_id) {
                                                                if first_hops.is_none() || *source != our_node_id {
                                                                        if directed_channel.direction().enabled {
-                                                                               let candidate = CandidateRouteHop::PublicHop {
+                                                                               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
                                                                                        info: directed_channel,
                                                                                        short_channel_id: *chan_id,
-                                                                               };
-                                                                               add_entry!(candidate, *source, $node_id,
-                                                                                       $fee_to_target_msat,
+                                                                               });
+                                                                               add_entry!(&candidate,
+                                                                                       fee_to_target_msat,
                                                                                        $next_hops_value_contribution,
-                                                                                       $next_hops_path_htlc_minimum_msat,
-                                                                                       $next_hops_path_penalty_msat,
+                                                                                       next_hops_path_htlc_minimum_msat,
+                                                                                       next_hops_path_penalty_msat,
                                                                                        $next_hops_cltv_delta, $next_hops_path_length);
                                                                        }
                                                                }
@@ -2078,8 +2442,10 @@ where L::Target: Logger {
                // place where it could be added.
                payee_node_id_opt.map(|payee| first_hop_targets.get(&payee).map(|first_channels| {
                        for details in first_channels {
-                               let candidate = CandidateRouteHop::FirstHop { details };
-                               let added = add_entry!(candidate, our_node_id, payee, 0, path_value_msat,
+                               let candidate = CandidateRouteHop::FirstHop(FirstHopCandidate {
+                                       details, payer_node_id: &our_node_id,
+                               });
+                               let added = add_entry!(&candidate, 0, path_value_msat,
                                                                        0, 0u64, 0, 0).is_some();
                                log_trace!(logger, "{} direct route to payee via {}",
                                                if added { "Added" } else { "Skipped" }, LoggedCandidateHop(&candidate));
@@ -2095,7 +2461,7 @@ where L::Target: Logger {
                        // If not, targets.pop() will not even let us enter the loop in step 2.
                        None => {},
                        Some(node) => {
-                               add_entries_to_cheapest_to_target_node!(node, payee, 0, path_value_msat, 0, 0u64, 0, 0);
+                               add_entries_to_cheapest_to_target_node!(node, payee, path_value_msat, 0, 0);
                        },
                });
 
@@ -2113,10 +2479,10 @@ where L::Target: Logger {
                                network_nodes.get(&intro_node_id).is_some();
                        if !have_intro_node_in_graph || our_node_id == intro_node_id { continue }
                        let candidate = if hint.1.blinded_hops.len() == 1 {
-                               CandidateRouteHop::OneHopBlinded { hint, hint_idx }
-                       } else { CandidateRouteHop::Blinded { hint, hint_idx } };
+                               CandidateRouteHop::OneHopBlinded(OneHopBlindedPathCandidate { hint, hint_idx })
+                       } else { CandidateRouteHop::Blinded(BlindedPathCandidate { hint, hint_idx }) };
                        let mut path_contribution_msat = path_value_msat;
-                       if let Some(hop_used_msat) = add_entry!(candidate, intro_node_id, maybe_dummy_payee_node_id,
+                       if let Some(hop_used_msat) = add_entry!(&candidate,
                                0, path_contribution_msat, 0, 0_u64, 0, 0)
                        {
                                path_contribution_msat = hop_used_msat;
@@ -2125,14 +2491,16 @@ where L::Target: Logger {
                                sort_first_hop_channels(first_channels, &used_liquidities, recommended_value_msat,
                                        our_node_pubkey);
                                for details in first_channels {
-                                       let first_hop_candidate = CandidateRouteHop::FirstHop { details };
+                                       let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate {
+                                               details, payer_node_id: &our_node_id,
+                                       });
                                        let blinded_path_fee = match compute_fees(path_contribution_msat, candidate.fees()) {
                                                Some(fee) => fee,
                                                None => continue
                                        };
                                        let path_min = candidate.htlc_minimum_msat().saturating_add(
                                                compute_fees_saturating(candidate.htlc_minimum_msat(), candidate.fees()));
-                                       add_entry!(first_hop_candidate, our_node_id, intro_node_id, blinded_path_fee,
+                                       add_entry!(&first_hop_candidate, blinded_path_fee,
                                                path_contribution_msat, path_min, 0_u64, candidate.cltv_expiry_delta(),
                                                candidate.blinded_path().map_or(1, |bp| bp.blinded_hops.len() as u8));
                                }
@@ -2164,8 +2532,7 @@ where L::Target: Logger {
                                let mut aggregate_path_contribution_msat = path_value_msat;
 
                                for (idx, (hop, prev_hop_id)) in hop_iter.zip(prev_hop_iter).enumerate() {
-                                       let source = NodeId::from_pubkey(&hop.src_node_id);
-                                       let target = NodeId::from_pubkey(&prev_hop_id);
+                                       let target = private_hop_key_cache.get(&prev_hop_id).unwrap();
 
                                        if let Some(first_channels) = first_hop_targets.get(&target) {
                                                if first_channels.iter().any(|d| d.outbound_scid_alias == Some(hop.short_channel_id)) {
@@ -2178,13 +2545,13 @@ where L::Target: Logger {
                                        let candidate = network_channels
                                                .get(&hop.short_channel_id)
                                                .and_then(|channel| channel.as_directed_to(&target))
-                                               .map(|(info, _)| CandidateRouteHop::PublicHop {
+                                               .map(|(info, _)| CandidateRouteHop::PublicHop(PublicHopCandidate {
                                                        info,
                                                        short_channel_id: hop.short_channel_id,
-                                               })
-                                               .unwrap_or_else(|| CandidateRouteHop::PrivateHop { hint: hop });
+                                               }))
+                                               .unwrap_or_else(|| CandidateRouteHop::PrivateHop(PrivateHopCandidate { hint: hop, target_node_id: target }));
 
-                                       if let Some(hop_used_msat) = add_entry!(candidate, source, target,
+                                       if let Some(hop_used_msat) = add_entry!(&candidate,
                                                aggregate_next_hops_fee_msat, aggregate_path_contribution_msat,
                                                aggregate_next_hops_path_htlc_minimum_msat, aggregate_next_hops_path_penalty_msat,
                                                aggregate_next_hops_cltv_delta, aggregate_next_hops_path_length)
@@ -2198,7 +2565,7 @@ where L::Target: Logger {
                                        }
 
                                        let used_liquidity_msat = used_liquidities
-                                               .get(&candidate.id(source < target)).copied()
+                                               .get(&candidate.id()).copied()
                                                .unwrap_or(0);
                                        let channel_usage = ChannelUsage {
                                                amount_msat: final_value_msat + aggregate_next_hops_fee_msat,
@@ -2206,7 +2573,7 @@ where L::Target: Logger {
                                                effective_capacity: candidate.effective_capacity(),
                                        };
                                        let channel_penalty_msat = scorer.channel_penalty_msat(
-                                               hop.short_channel_id, &source, &target, channel_usage, score_params
+                                               &candidate, channel_usage, score_params
                                        );
                                        aggregate_next_hops_path_penalty_msat = aggregate_next_hops_path_penalty_msat
                                                .saturating_add(channel_penalty_msat);
@@ -2222,8 +2589,10 @@ where L::Target: Logger {
                                                sort_first_hop_channels(first_channels, &used_liquidities,
                                                        recommended_value_msat, our_node_pubkey);
                                                for details in first_channels {
-                                                       let first_hop_candidate = CandidateRouteHop::FirstHop { details };
-                                                       add_entry!(first_hop_candidate, our_node_id, target,
+                                                       let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate {
+                                                               details, payer_node_id: &our_node_id,
+                                                       });
+                                                       add_entry!(&first_hop_candidate,
                                                                aggregate_next_hops_fee_msat, aggregate_path_contribution_msat,
                                                                aggregate_next_hops_path_htlc_minimum_msat, aggregate_next_hops_path_penalty_msat,
                                                                aggregate_next_hops_cltv_delta, aggregate_next_hops_path_length);
@@ -2267,9 +2636,10 @@ where L::Target: Logger {
                                                        sort_first_hop_channels(first_channels, &used_liquidities,
                                                                recommended_value_msat, our_node_pubkey);
                                                        for details in first_channels {
-                                                               let first_hop_candidate = CandidateRouteHop::FirstHop { details };
-                                                               add_entry!(first_hop_candidate, our_node_id,
-                                                                       NodeId::from_pubkey(&hop.src_node_id),
+                                                               let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate {
+                                                                       details, payer_node_id: &our_node_id,
+                                                               });
+                                                               add_entry!(&first_hop_candidate,
                                                                        aggregate_next_hops_fee_msat,
                                                                        aggregate_path_contribution_msat,
                                                                        aggregate_next_hops_path_htlc_minimum_msat,
@@ -2298,7 +2668,7 @@ where L::Target: Logger {
                // Both these cases (and other cases except reaching recommended_value_msat) mean that
                // paths_collection will be stopped because found_new_path==false.
                // This is not necessarily a routing failure.
-               'path_construction: while let Some(RouteGraphNode { node_id, lowest_fee_to_node, total_cltv_delta, mut value_contribution_msat, path_htlc_minimum_msat, path_penalty_msat, path_length_to_node, .. }) = targets.pop() {
+               'path_construction: while let Some(RouteGraphNode { node_id, total_cltv_delta, mut value_contribution_msat, path_length_to_node, .. }) = targets.pop() {
 
                        // Since we're going payee-to-payer, hitting our node as a target means we should stop
                        // traversing the graph and arrange the path out of what we found.
@@ -2308,10 +2678,13 @@ where L::Target: Logger {
 
                                'path_walk: loop {
                                        let mut features_set = false;
-                                       if let Some(first_channels) = first_hop_targets.get(&ordered_hops.last().unwrap().0.node_id) {
+                                       let target = ordered_hops.last().unwrap().0.candidate.target().unwrap_or(maybe_dummy_payee_node_id);
+                                       if let Some(first_channels) = first_hop_targets.get(&target) {
                                                for details in first_channels {
-                                                       if let Some(scid) = ordered_hops.last().unwrap().0.candidate.short_channel_id() {
-                                                               if details.get_outbound_payment_scid().unwrap() == scid {
+                                                       if let CandidateRouteHop::FirstHop(FirstHopCandidate { details: last_hop_details, .. })
+                                                               = ordered_hops.last().unwrap().0.candidate
+                                                       {
+                                                               if details.get_outbound_payment_scid() == last_hop_details.get_outbound_payment_scid() {
                                                                        ordered_hops.last_mut().unwrap().1 = details.counterparty.features.to_context();
                                                                        features_set = true;
                                                                        break;
@@ -2320,7 +2693,7 @@ where L::Target: Logger {
                                                }
                                        }
                                        if !features_set {
-                                               if let Some(node) = network_nodes.get(&ordered_hops.last().unwrap().0.node_id) {
+                                               if let Some(node) = network_nodes.get(&target) {
                                                        if let Some(node_info) = node.announcement_info.as_ref() {
                                                                ordered_hops.last_mut().unwrap().1 = node_info.features.clone();
                                                        } else {
@@ -2337,11 +2710,11 @@ where L::Target: Logger {
                                        // save this path for the payment route. Also, update the liquidity
                                        // remaining on the used hops, so that we take them into account
                                        // while looking for more paths.
-                                       if ordered_hops.last().unwrap().0.node_id == maybe_dummy_payee_node_id {
+                                       if target == maybe_dummy_payee_node_id {
                                                break 'path_walk;
                                        }
 
-                                       new_entry = match dist.remove(&ordered_hops.last().unwrap().0.node_id) {
+                                       new_entry = match dist.remove(&target) {
                                                Some(payment_hop) => payment_hop,
                                                // We can't arrive at None because, if we ever add an entry to targets,
                                                // we also fill in the entry in dist (see add_entry!).
@@ -2380,12 +2753,10 @@ where L::Target: Logger {
                                // Remember that we used these channels so that we don't rely
                                // on the same liquidity in future paths.
                                let mut prevented_redundant_path_selection = false;
-                               let prev_hop_iter = core::iter::once(&our_node_id)
-                                       .chain(payment_path.hops.iter().map(|(hop, _)| &hop.node_id));
-                               for (prev_hop, (hop, _)) in prev_hop_iter.zip(payment_path.hops.iter()) {
+                               for (hop, _) in payment_path.hops.iter() {
                                        let spent_on_hop_msat = value_contribution_msat + hop.next_hops_fee_msat;
                                        let used_liquidity_msat = used_liquidities
-                                               .entry(hop.candidate.id(*prev_hop < hop.node_id))
+                                               .entry(hop.candidate.id())
                                                .and_modify(|used_liquidity_msat| *used_liquidity_msat += spent_on_hop_msat)
                                                .or_insert(spent_on_hop_msat);
                                        let hop_capacity = hop.candidate.effective_capacity();
@@ -2406,8 +2777,10 @@ where L::Target: Logger {
                                        log_trace!(logger,
                                                "Disabling route candidate {} for future path building iterations to avoid duplicates.",
                                                LoggedCandidateHop(victim_candidate));
-                                       *used_liquidities.entry(victim_candidate.id(false)).or_default() = exhausted;
-                                       *used_liquidities.entry(victim_candidate.id(true)).or_default() = exhausted;
+                                       if let Some(scid) = victim_candidate.short_channel_id() {
+                                               *used_liquidities.entry(CandidateHopId::Clear((scid, false))).or_default() = exhausted;
+                                               *used_liquidities.entry(CandidateHopId::Clear((scid, true))).or_default() = exhausted;
+                                       }
                                }
 
                                // Track the total amount all our collected paths allow to send so that we know
@@ -2430,8 +2803,8 @@ where L::Target: Logger {
                        match network_nodes.get(&node_id) {
                                None => {},
                                Some(node) => {
-                                       add_entries_to_cheapest_to_target_node!(node, node_id, lowest_fee_to_node,
-                                               value_contribution_msat, path_htlc_minimum_msat, path_penalty_msat,
+                                       add_entries_to_cheapest_to_target_node!(node, node_id,
+                                               value_contribution_msat,
                                                total_cltv_delta, path_length_to_node);
                                },
                        }
@@ -2553,15 +2926,15 @@ where L::Target: Logger {
        selected_route.sort_unstable_by_key(|path| {
                let mut key = [CandidateHopId::Clear((42, true)) ; MAX_PATH_LENGTH_ESTIMATE as usize];
                debug_assert!(path.hops.len() <= key.len());
-               for (scid, key) in path.hops.iter() .map(|h| h.0.candidate.id(true)).zip(key.iter_mut()) {
+               for (scid, key) in path.hops.iter() .map(|h| h.0.candidate.id()).zip(key.iter_mut()) {
                        *key = scid;
                }
                key
        });
        for idx in 0..(selected_route.len() - 1) {
                if idx + 1 >= selected_route.len() { break; }
-               if iter_equal(selected_route[idx    ].hops.iter().map(|h| (h.0.candidate.id(true), h.0.node_id)),
-                             selected_route[idx + 1].hops.iter().map(|h| (h.0.candidate.id(true), h.0.node_id))) {
+               if iter_equal(selected_route[idx    ].hops.iter().map(|h| (h.0.candidate.id(), h.0.candidate.target())),
+                             selected_route[idx + 1].hops.iter().map(|h| (h.0.candidate.id(), h.0.candidate.target()))) {
                        let new_value = selected_route[idx].get_value_msat() + selected_route[idx + 1].get_value_msat();
                        selected_route[idx].update_value_and_recompute_fees(new_value);
                        selected_route.remove(idx + 1);
@@ -2571,29 +2944,29 @@ where L::Target: Logger {
        let mut paths = Vec::new();
        for payment_path in selected_route {
                let mut hops = Vec::with_capacity(payment_path.hops.len());
-               let mut prev_hop_node_id = our_node_id;
                for (hop, node_features) in payment_path.hops.iter()
                        .filter(|(h, _)| h.candidate.short_channel_id().is_some())
                {
-                       let maybe_announced_channel = if let CandidateRouteHop::PublicHop { .. } = hop.candidate {
+                       let target = hop.candidate.target().expect("target is defined when short_channel_id is defined");
+                       let maybe_announced_channel = if let CandidateRouteHop::PublicHop(_) = hop.candidate {
                                // If we sourced the hop from the graph we're sure the target node is announced.
                                true
-                       } else if let CandidateRouteHop::FirstHop { details } = hop.candidate {
+                       } else if let CandidateRouteHop::FirstHop(first_hop) = &hop.candidate {
                                // If this is a first hop we also know if it's announced.
-                               details.is_public
+                               first_hop.details.is_public
                        } else {
                                // If we sourced it any other way, we double-check the network graph to see if
                                // there are announced channels between the endpoints. If so, the hop might be
                                // referring to any of the announced channels, as its `short_channel_id` might be
                                // an alias, in which case we don't take any chances here.
-                               network_graph.node(&hop.node_id).map_or(false, |hop_node|
+                               network_graph.node(&target).map_or(false, |hop_node|
                                        hop_node.channels.iter().any(|scid| network_graph.channel(*scid)
-                                                       .map_or(false, |c| c.as_directed_from(&prev_hop_node_id).is_some()))
+                                                       .map_or(false, |c| c.as_directed_from(&hop.candidate.source()).is_some()))
                                )
                        };
 
                        hops.push(RouteHop {
-                               pubkey: PublicKey::from_slice(hop.node_id.as_slice()).map_err(|_| LightningError{err: format!("Public key {:?} is invalid", &hop.node_id), action: ErrorAction::IgnoreAndLog(Level::Trace)})?,
+                               pubkey: PublicKey::from_slice(target.as_slice()).map_err(|_| LightningError{err: format!("Public key {:?} is invalid", &target), action: ErrorAction::IgnoreAndLog(Level::Trace)})?,
                                node_features: node_features.clone(),
                                short_channel_id: hop.candidate.short_channel_id().unwrap(),
                                channel_features: hop.candidate.features(),
@@ -2601,8 +2974,6 @@ where L::Target: Logger {
                                cltv_expiry_delta: hop.candidate.cltv_expiry_delta(),
                                maybe_announced_channel,
                        });
-
-                       prev_hop_node_id = hop.node_id;
                }
                let mut final_cltv_delta = final_cltv_expiry_delta;
                let blinded_tail = payment_path.hops.last().and_then(|(h, _)| {
@@ -2765,13 +3136,13 @@ fn build_route_from_hops_internal<L: Deref>(
 
        impl ScoreLookUp for HopScorer {
                type ScoreParams = ();
-               fn channel_penalty_msat(&self, _short_channel_id: u64, source: &NodeId, target: &NodeId,
+               fn channel_penalty_msat(&self, candidate: &CandidateRouteHop,
                        _usage: ChannelUsage, _score_params: &Self::ScoreParams) -> u64
                {
                        let mut cur_id = self.our_node_id;
                        for i in 0..self.hop_ids.len() {
                                if let Some(next_id) = self.hop_ids[i] {
-                                       if cur_id == *source && next_id == *target {
+                                       if cur_id == candidate.source() && Some(next_id) == candidate.target() {
                                                return 0;
                                        }
                                        cur_id = next_id;
@@ -2812,7 +3183,7 @@ mod tests {
        use crate::routing::utxo::UtxoResult;
        use crate::routing::router::{get_route, build_route_from_hops_internal, add_random_cltv_offset, default_node_features,
                BlindedTail, InFlightHtlcs, Path, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RoutingFees,
-               DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, MAX_PATH_LENGTH_ESTIMATE, RouteParameters};
+               DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, MAX_PATH_LENGTH_ESTIMATE, RouteParameters, CandidateRouteHop, PublicHopCandidate};
        use crate::routing::scoring::{ChannelUsage, FixedPenaltyScorer, ScoreLookUp, ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
        use crate::routing::test_utils::{add_channel, add_or_update_node, build_graph, build_line_graph, id_to_feature_flags, get_nodes, update_channel};
        use crate::chain::transaction::OutPoint;
@@ -6117,8 +6488,8 @@ mod tests {
        }
        impl ScoreLookUp for BadChannelScorer {
                type ScoreParams = ();
-               fn channel_penalty_msat(&self, short_channel_id: u64, _: &NodeId, _: &NodeId, _: ChannelUsage, _score_params:&Self::ScoreParams) -> u64 {
-                       if short_channel_id == self.short_channel_id { u64::max_value() } else { 0 }
+               fn channel_penalty_msat(&self, candidate: &CandidateRouteHop, _: ChannelUsage, _score_params:&Self::ScoreParams) -> u64 {
+                       if candidate.short_channel_id() == Some(self.short_channel_id) { u64::max_value()  } else { 0  }
                }
        }
 
@@ -6133,8 +6504,8 @@ mod tests {
 
        impl ScoreLookUp for BadNodeScorer {
                type ScoreParams = ();
-               fn channel_penalty_msat(&self, _: u64, _: &NodeId, target: &NodeId, _: ChannelUsage, _score_params:&Self::ScoreParams) -> u64 {
-                       if *target == self.node_id { u64::max_value() } else { 0 }
+               fn channel_penalty_msat(&self, candidate: &CandidateRouteHop, _: ChannelUsage, _score_params:&Self::ScoreParams) -> u64 {
+                       if candidate.target() == Some(self.node_id) { u64::max_value() } else { 0 }
                }
        }
 
@@ -6622,26 +6993,32 @@ mod tests {
                };
                scorer_params.set_manual_penalty(&NodeId::from_pubkey(&nodes[3]), 123);
                scorer_params.set_manual_penalty(&NodeId::from_pubkey(&nodes[4]), 456);
-               assert_eq!(scorer.channel_penalty_msat(42, &NodeId::from_pubkey(&nodes[3]), &NodeId::from_pubkey(&nodes[4]), usage, &scorer_params), 456);
+               let network_graph = network_graph.read_only();
+               let channels = network_graph.channels();
+               let channel = channels.get(&5).unwrap();
+               let info = channel.as_directed_from(&NodeId::from_pubkey(&nodes[3])).unwrap();
+               let candidate: CandidateRouteHop = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info: info.0,
+                       short_channel_id: 5,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &scorer_params), 456);
 
                // Then check we can get a normal route
                let payment_params = PaymentParameters::from_node_id(nodes[10], 42);
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params, 100);
-               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+               let route = get_route(&our_id, &route_params, &network_graph, None,
                        Arc::clone(&logger), &scorer, &scorer_params, &random_seed_bytes);
                assert!(route.is_ok());
 
                // Then check that we can't get a route if we ban an intermediate node.
                scorer_params.add_banned(&NodeId::from_pubkey(&nodes[3]));
-               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
-                       Arc::clone(&logger), &scorer, &scorer_params, &random_seed_bytes);
+               let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer, &scorer_params,&random_seed_bytes);
                assert!(route.is_err());
 
                // Finally make sure we can route again, when we remove the ban.
                scorer_params.remove_banned(&NodeId::from_pubkey(&nodes[3]));
-               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
-                       Arc::clone(&logger), &scorer, &scorer_params, &random_seed_bytes);
+               let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer, &scorer_params,&random_seed_bytes);
                assert!(route.is_ok());
        }
 
@@ -7917,6 +8294,7 @@ mod tests {
 pub(crate) mod bench_utils {
        use super::*;
        use std::fs::File;
+       use std::time::Duration;
 
        use bitcoin::hashes::Hash;
        use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
@@ -8065,10 +8443,10 @@ pub(crate) mod bench_utils {
                                                if let Ok(route) = route_res {
                                                        for path in route.paths {
                                                                if seed & 0x80 == 0 {
-                                                                       scorer.payment_path_successful(&path);
+                                                                       scorer.payment_path_successful(&path, Duration::ZERO);
                                                                } else {
                                                                        let short_channel_id = path.hops[path.hops.len() / 2].short_channel_id;
-                                                                       scorer.payment_path_failed(&path, short_channel_id);
+                                                                       scorer.payment_path_failed(&path, short_channel_id, Duration::ZERO);
                                                                }
                                                                seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
                                                        }
@@ -8116,7 +8494,7 @@ pub mod benches {
 
        struct DummyLogger {}
        impl Logger for DummyLogger {
-               fn log(&self, _record: &Record) {}
+               fn log(&self, _record: Record) {}
        }
 
        pub fn generate_routes_with_zero_penalty_scorer(bench: &mut Criterion) {
index 9c907c3f7fe4bd38d7526b9d10871769ea537d27..646405c6287ac150d88199890137020e0350079f 100644 (file)
@@ -26,7 +26,7 @@
 //! #
 //! # struct FakeLogger {};
 //! # impl Logger for FakeLogger {
-//! #     fn log(&self, record: &Record) { unimplemented!() }
+//! #     fn log(&self, record: Record) { unimplemented!() }
 //! # }
 //! # fn find_scored_route(payer: PublicKey, route_params: RouteParameters, network_graph: NetworkGraph<&FakeLogger>) {
 //! # let logger = FakeLogger {};
 
 use crate::ln::msgs::DecodeError;
 use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
-use crate::routing::router::Path;
+use crate::routing::router::{Path, CandidateRouteHop, PublicHopCandidate};
 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
 use crate::util::logger::Logger;
-use crate::util::time::Time;
 
 use crate::prelude::*;
 use core::{cmp, fmt};
-use core::cell::{RefCell, RefMut, Ref};
 use core::convert::TryInto;
 use core::ops::{Deref, DerefMut};
 use core::time::Duration;
 use crate::io::{self, Read};
-use crate::sync::{Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard};
+use crate::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
+#[cfg(not(c_bindings))]
+use {
+       core::cell::{RefCell, RefMut, Ref},
+       crate::sync::{Mutex, MutexGuard},
+};
 
 /// We define Score ever-so-slightly differently based on whether we are being built for C bindings
 /// or not. For users, `LockableScore` must somehow be writeable to disk. For Rust users, this is
@@ -103,23 +106,29 @@ pub trait ScoreLookUp {
        /// [`u64::max_value`] is given to indicate sufficient capacity for the invoice's full amount.
        /// Thus, implementations should be overflow-safe.
        fn channel_penalty_msat(
-               &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
+               &self, candidate: &CandidateRouteHop, usage: ChannelUsage, score_params: &Self::ScoreParams
        ) -> u64;
 }
 
 /// `ScoreUpdate` is used to update the scorer's internal state after a payment attempt.
 pub trait ScoreUpdate {
        /// Handles updating channel penalties after failing to route through a channel.
-       fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64);
+       fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration);
 
        /// Handles updating channel penalties after successfully routing along a path.
-       fn payment_path_successful(&mut self, path: &Path);
+       fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration);
 
        /// Handles updating channel penalties after a probe over the given path failed.
-       fn probe_failed(&mut self, path: &Path, short_channel_id: u64);
+       fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration);
 
        /// Handles updating channel penalties after a probe over the given path succeeded.
-       fn probe_successful(&mut self, path: &Path);
+       fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration);
+
+       /// Scorers may wish to reduce their certainty of channel liquidity information over time.
+       /// Thus, this method is provided to allow scorers to observe the passage of time - the holder
+       /// of this object should call this method regularly (generally via the
+       /// `lightning-background-processor` crate).
+       fn time_passed(&mut self, duration_since_epoch: Duration);
 }
 
 /// A trait which can both lookup and update routing channel penalty scores.
@@ -137,28 +146,32 @@ impl<T: ScoreLookUp + ScoreUpdate $(+ $supertrait)*> Score for T {}
 impl<S: ScoreLookUp, T: Deref<Target=S>> ScoreLookUp for T {
        type ScoreParams = S::ScoreParams;
        fn channel_penalty_msat(
-               &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
+               &self, candidate: &CandidateRouteHop, usage: ChannelUsage, score_params: &Self::ScoreParams
        ) -> u64 {
-               self.deref().channel_penalty_msat(short_channel_id, source, target, usage, score_params)
+               self.deref().channel_penalty_msat(candidate, usage, score_params)
        }
 }
 
 #[cfg(not(c_bindings))]
 impl<S: ScoreUpdate, T: DerefMut<Target=S>> ScoreUpdate for T {
-       fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
-               self.deref_mut().payment_path_failed(path, short_channel_id)
+       fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
+               self.deref_mut().payment_path_failed(path, short_channel_id, duration_since_epoch)
+       }
+
+       fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
+               self.deref_mut().payment_path_successful(path, duration_since_epoch)
        }
 
-       fn payment_path_successful(&mut self, path: &Path) {
-               self.deref_mut().payment_path_successful(path)
+       fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
+               self.deref_mut().probe_failed(path, short_channel_id, duration_since_epoch)
        }
 
-       fn probe_failed(&mut self, path: &Path, short_channel_id: u64) {
-               self.deref_mut().probe_failed(path, short_channel_id)
+       fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
+               self.deref_mut().probe_successful(path, duration_since_epoch)
        }
 
-       fn probe_successful(&mut self, path: &Path) {
-               self.deref_mut().probe_successful(path)
+       fn time_passed(&mut self, duration_since_epoch: Duration) {
+               self.deref_mut().time_passed(duration_since_epoch)
        }
 }
 } }
@@ -315,10 +328,9 @@ impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockRead<'a, T> {
 #[cfg(c_bindings)]
 impl<'a, T: Score> ScoreLookUp for MultiThreadedScoreLockRead<'a, T> {
        type ScoreParams = T::ScoreParams;
-       fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId,
-               target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
+       fn channel_penalty_msat(&self, candidate:&CandidateRouteHop, usage: ChannelUsage, score_params: &Self::ScoreParams
        ) -> u64 {
-               self.0.channel_penalty_msat(short_channel_id, source, target, usage, score_params)
+               self.0.channel_penalty_msat(candidate, usage, score_params)
        }
 }
 
@@ -347,20 +359,24 @@ impl<'a, T: 'a + Score> DerefMut for MultiThreadedScoreLockWrite<'a, T> {
 
 #[cfg(c_bindings)]
 impl<'a, T: Score> ScoreUpdate for MultiThreadedScoreLockWrite<'a, T> {
-       fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
-               self.0.payment_path_failed(path, short_channel_id)
+       fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
+               self.0.payment_path_failed(path, short_channel_id, duration_since_epoch)
        }
 
-       fn payment_path_successful(&mut self, path: &Path) {
-               self.0.payment_path_successful(path)
+       fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
+               self.0.payment_path_successful(path, duration_since_epoch)
        }
 
-       fn probe_failed(&mut self, path: &Path, short_channel_id: u64) {
-               self.0.probe_failed(path, short_channel_id)
+       fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
+               self.0.probe_failed(path, short_channel_id, duration_since_epoch)
        }
 
-       fn probe_successful(&mut self, path: &Path) {
-               self.0.probe_successful(path)
+       fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
+               self.0.probe_successful(path, duration_since_epoch)
+       }
+
+       fn time_passed(&mut self, duration_since_epoch: Duration) {
+               self.0.time_passed(duration_since_epoch)
        }
 }
 
@@ -394,19 +410,21 @@ impl FixedPenaltyScorer {
 
 impl ScoreLookUp for FixedPenaltyScorer {
        type ScoreParams = ();
-       fn channel_penalty_msat(&self, _: u64, _: &NodeId, _: &NodeId, _: ChannelUsage, _score_params: &Self::ScoreParams) -> u64 {
+       fn channel_penalty_msat(&self, _: &CandidateRouteHop, _: ChannelUsage, _score_params: &Self::ScoreParams) -> u64 {
                self.penalty_msat
        }
 }
 
 impl ScoreUpdate for FixedPenaltyScorer {
-       fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
+       fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64, _duration_since_epoch: Duration) {}
+
+       fn payment_path_successful(&mut self, _path: &Path, _duration_since_epoch: Duration) {}
 
-       fn payment_path_successful(&mut self, _path: &Path) {}
+       fn probe_failed(&mut self, _path: &Path, _short_channel_id: u64, _duration_since_epoch: Duration) {}
 
-       fn probe_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
+       fn probe_successful(&mut self, _path: &Path, _duration_since_epoch: Duration) {}
 
-       fn probe_successful(&mut self, _path: &Path) {}
+       fn time_passed(&mut self, _duration_since_epoch: Duration) {}
 }
 
 impl Writeable for FixedPenaltyScorer {
@@ -425,13 +443,6 @@ impl ReadableArgs<u64> for FixedPenaltyScorer {
        }
 }
 
-#[cfg(not(feature = "no-std"))]
-type ConfiguredTime = crate::util::time::MonotonicTime;
-#[cfg(feature = "no-std")]
-use crate::util::time::Eternity;
-#[cfg(feature = "no-std")]
-type ConfiguredTime = Eternity;
-
 /// [`ScoreLookUp`] implementation using channel success probability distributions.
 ///
 /// Channels are tracked with upper and lower liquidity bounds - when an HTLC fails at a channel,
@@ -457,29 +468,18 @@ type ConfiguredTime = Eternity;
 /// formula, but using the history of a channel rather than our latest estimates for the liquidity
 /// bounds.
 ///
-/// # Note
-///
-/// Mixing the `no-std` feature between serialization and deserialization results in undefined
-/// behavior.
-///
 /// [1]: https://arxiv.org/abs/2107.05322
 /// [`liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_multiplier_msat
 /// [`liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_amount_multiplier_msat
 /// [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life
 /// [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_multiplier_msat
 /// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_amount_multiplier_msat
-pub type ProbabilisticScorer<G, L> = ProbabilisticScorerUsingTime::<G, L, ConfiguredTime>;
-
-/// Probabilistic [`ScoreLookUp`] implementation.
-///
-/// This is not exported to bindings users generally all users should use the [`ProbabilisticScorer`] type alias.
-pub struct ProbabilisticScorerUsingTime<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time>
+pub struct ProbabilisticScorer<G: Deref<Target = NetworkGraph<L>>, L: Deref>
 where L::Target: Logger {
        decay_params: ProbabilisticScoringDecayParameters,
        network_graph: G,
        logger: L,
-       // TODO: Remove entries of closed channels.
-       channel_liquidities: HashMap<u64, ChannelLiquidity<T>>,
+       channel_liquidities: HashMap<u64, ChannelLiquidity>,
 }
 
 /// Parameters for configuring [`ProbabilisticScorer`].
@@ -734,7 +734,7 @@ pub struct ProbabilisticScoringDecayParameters {
        ///
        /// Default value: 14 days
        ///
-       /// [`historical_estimated_channel_liquidity_probabilities`]: ProbabilisticScorerUsingTime::historical_estimated_channel_liquidity_probabilities
+       /// [`historical_estimated_channel_liquidity_probabilities`]: ProbabilisticScorer::historical_estimated_channel_liquidity_probabilities
        pub historical_no_updates_half_life: Duration,
 
        /// Whenever this amount of time elapses since the last update to a channel's liquidity bounds,
@@ -783,33 +783,35 @@ impl ProbabilisticScoringDecayParameters {
 /// Direction is defined in terms of [`NodeId`] partial ordering, where the source node is the
 /// first node in the ordering of the channel's counterparties. Thus, swapping the two liquidity
 /// offset fields gives the opposite direction.
-struct ChannelLiquidity<T: Time> {
+struct ChannelLiquidity {
        /// Lower channel liquidity bound in terms of an offset from zero.
        min_liquidity_offset_msat: u64,
 
        /// Upper channel liquidity bound in terms of an offset from the effective capacity.
        max_liquidity_offset_msat: u64,
 
-       /// Time when the liquidity bounds were last modified.
-       last_updated: T,
-
        min_liquidity_offset_history: HistoricalBucketRangeTracker,
        max_liquidity_offset_history: HistoricalBucketRangeTracker,
+
+       /// Time when either liquidity bound was last modified as an offset since the unix epoch.
+       last_updated: Duration,
+
+       /// Time when the historical liquidity bounds were last modified as an offset against the unix
+       /// epoch.
+       offset_history_last_updated: Duration,
 }
 
-/// A snapshot of [`ChannelLiquidity`] in one direction assuming a certain channel capacity and
-/// decayed with a given half life.
-struct DirectedChannelLiquidity<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Time, U: Deref<Target = T>> {
+/// A snapshot of [`ChannelLiquidity`] in one direction assuming a certain channel capacity.
+struct DirectedChannelLiquidity<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Deref<Target = Duration>> {
        min_liquidity_offset_msat: L,
        max_liquidity_offset_msat: L,
        liquidity_history: HistoricalMinMaxBuckets<BRT>,
        capacity_msat: u64,
-       last_updated: U,
-       now: T,
-       decay_params: ProbabilisticScoringDecayParameters,
+       last_updated: T,
+       offset_history_last_updated: T,
 }
 
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> ProbabilisticScorer<G, L> where L::Target: Logger {
        /// Creates a new scorer using the given scoring parameters for sending payments from a node
        /// through a network graph.
        pub fn new(decay_params: ProbabilisticScoringDecayParameters, network_graph: G, logger: L) -> Self {
@@ -822,7 +824,7 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
        }
 
        #[cfg(test)]
-       fn with_channel(mut self, short_channel_id: u64, liquidity: ChannelLiquidity<T>) -> Self {
+       fn with_channel(mut self, short_channel_id: u64, liquidity: ChannelLiquidity) -> Self {
                assert!(self.channel_liquidities.insert(short_channel_id, liquidity).is_none());
                self
        }
@@ -832,20 +834,16 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
        /// Note that this writes roughly one line per channel for which we have a liquidity estimate,
        /// which may be a substantial amount of log output.
        pub fn debug_log_liquidity_stats(&self) {
-               let now = T::now();
-
                let graph = self.network_graph.read_only();
                for (scid, liq) in self.channel_liquidities.iter() {
                        if let Some(chan_debug) = graph.channels().get(scid) {
                                let log_direction = |source, target| {
                                        if let Some((directed_info, _)) = chan_debug.as_directed_to(target) {
                                                let amt = directed_info.effective_capacity().as_msat();
-                                               let dir_liq = liq.as_directed(source, target, amt, self.decay_params);
+                                               let dir_liq = liq.as_directed(source, target, amt);
 
-                                               let (min_buckets, max_buckets) = dir_liq.liquidity_history
-                                                       .get_decayed_buckets(now, *dir_liq.last_updated,
-                                                               self.decay_params.historical_no_updates_half_life)
-                                                       .unwrap_or(([0; 32], [0; 32]));
+                                               let min_buckets = &dir_liq.liquidity_history.min_liquidity_offset_history.buckets;
+                                               let max_buckets = &dir_liq.liquidity_history.max_liquidity_offset_history.buckets;
 
                                                log_debug!(self.logger, core::concat!(
                                                        "Liquidity from {} to {} via {} is in the range ({}, {}).\n",
@@ -894,7 +892,7 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
                        if let Some(liq) = self.channel_liquidities.get(&scid) {
                                if let Some((directed_info, source)) = chan.as_directed_to(target) {
                                        let amt = directed_info.effective_capacity().as_msat();
-                                       let dir_liq = liq.as_directed(source, target, amt, self.decay_params);
+                                       let dir_liq = liq.as_directed(source, target, amt);
                                        return Some((dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat()));
                                }
                        }
@@ -924,7 +922,7 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
        /// in the top and bottom bucket, and roughly with similar (recent) frequency.
        ///
        /// Because the datapoints are decayed slowly over time, values will eventually return to
-       /// `Some(([1; 32], [1; 32]))` and then to `None` once no datapoints remain.
+       /// `Some(([0; 32], [0; 32]))` or `None` if no data remains for a channel.
        ///
        /// In order to fetch a single success probability from the buckets provided here, as used in
        /// the scoring model, see [`Self::historical_estimated_payment_success_probability`].
@@ -936,13 +934,10 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
                        if let Some(liq) = self.channel_liquidities.get(&scid) {
                                if let Some((directed_info, source)) = chan.as_directed_to(target) {
                                        let amt = directed_info.effective_capacity().as_msat();
-                                       let dir_liq = liq.as_directed(source, target, amt, self.decay_params);
+                                       let dir_liq = liq.as_directed(source, target, amt);
 
-                                       let (min_buckets, mut max_buckets) =
-                                               dir_liq.liquidity_history.get_decayed_buckets(
-                                                       dir_liq.now, *dir_liq.last_updated,
-                                                       self.decay_params.historical_no_updates_half_life
-                                               )?;
+                                       let min_buckets = dir_liq.liquidity_history.min_liquidity_offset_history.buckets;
+                                       let mut max_buckets = dir_liq.liquidity_history.max_liquidity_offset_history.buckets;
 
                                        // Note that the liquidity buckets are an offset from the edge, so we inverse
                                        // the max order to get the probabilities from zero.
@@ -970,12 +965,10 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
                        if let Some(liq) = self.channel_liquidities.get(&scid) {
                                if let Some((directed_info, source)) = chan.as_directed_to(target) {
                                        let capacity_msat = directed_info.effective_capacity().as_msat();
-                                       let dir_liq = liq.as_directed(source, target, capacity_msat, self.decay_params);
+                                       let dir_liq = liq.as_directed(source, target, capacity_msat);
 
                                        return dir_liq.liquidity_history.calculate_success_probability_times_billion(
-                                               dir_liq.now, *dir_liq.last_updated,
-                                               self.decay_params.historical_no_updates_half_life, &params, amount_msat,
-                                               capacity_msat
+                                               &params, amount_msat, capacity_msat
                                        ).map(|p| p as f64 / (1024 * 1024 * 1024) as f64);
                                }
                        }
@@ -984,23 +977,23 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
        }
 }
 
-impl<T: Time> ChannelLiquidity<T> {
-       #[inline]
-       fn new() -> Self {
+impl ChannelLiquidity {
+       fn new(last_updated: Duration) -> Self {
                Self {
                        min_liquidity_offset_msat: 0,
                        max_liquidity_offset_msat: 0,
                        min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
                        max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
-                       last_updated: T::now(),
+                       last_updated,
+                       offset_history_last_updated: last_updated,
                }
        }
 
        /// Returns a view of the channel liquidity directed from `source` to `target` assuming
        /// `capacity_msat`.
        fn as_directed(
-               &self, source: &NodeId, target: &NodeId, capacity_msat: u64, decay_params: ProbabilisticScoringDecayParameters
-       ) -> DirectedChannelLiquidity<&u64, &HistoricalBucketRangeTracker, T, &T> {
+               &self, source: &NodeId, target: &NodeId, capacity_msat: u64,
+       ) -> DirectedChannelLiquidity<&u64, &HistoricalBucketRangeTracker, &Duration> {
                let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) =
                        if source < target {
                                (&self.min_liquidity_offset_msat, &self.max_liquidity_offset_msat,
@@ -1019,16 +1012,15 @@ impl<T: Time> ChannelLiquidity<T> {
                        },
                        capacity_msat,
                        last_updated: &self.last_updated,
-                       now: T::now(),
-                       decay_params: decay_params,
+                       offset_history_last_updated: &self.offset_history_last_updated,
                }
        }
 
        /// Returns a mutable view of the channel liquidity directed from `source` to `target` assuming
        /// `capacity_msat`.
        fn as_directed_mut(
-               &mut self, source: &NodeId, target: &NodeId, capacity_msat: u64, decay_params: ProbabilisticScoringDecayParameters
-       ) -> DirectedChannelLiquidity<&mut u64, &mut HistoricalBucketRangeTracker, T, &mut T> {
+               &mut self, source: &NodeId, target: &NodeId, capacity_msat: u64,
+       ) -> DirectedChannelLiquidity<&mut u64, &mut HistoricalBucketRangeTracker, &mut Duration> {
                let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) =
                        if source < target {
                                (&mut self.min_liquidity_offset_msat, &mut self.max_liquidity_offset_msat,
@@ -1047,8 +1039,20 @@ impl<T: Time> ChannelLiquidity<T> {
                        },
                        capacity_msat,
                        last_updated: &mut self.last_updated,
-                       now: T::now(),
-                       decay_params: decay_params,
+                       offset_history_last_updated: &mut self.offset_history_last_updated,
+               }
+       }
+
+       fn decayed_offset(
+               &self, offset: u64, duration_since_epoch: Duration,
+               decay_params: ProbabilisticScoringDecayParameters,
+       ) -> u64 {
+               let half_life = decay_params.liquidity_offset_half_life.as_secs_f64();
+               if half_life != 0.0 {
+                       let elapsed_time = duration_since_epoch.saturating_sub(self.last_updated).as_secs_f64();
+                       ((offset as f64) * powf64(0.5, elapsed_time / half_life)) as u64
+               } else {
+                       0
                }
        }
 }
@@ -1134,7 +1138,8 @@ fn success_probability(
        (numerator, denominator)
 }
 
-impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Time, U: Deref<Target = T>> DirectedChannelLiquidity< L, BRT, T, U> {
+impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Deref<Target = Duration>>
+DirectedChannelLiquidity< L, BRT, T> {
        /// Returns a liquidity penalty for routing the given HTLC `amount_msat` through the channel in
        /// this direction.
        fn penalty_msat(&self, amount_msat: u64, score_params: &ProbabilisticScoringFeeParameters) -> u64 {
@@ -1182,9 +1187,8 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
                if score_params.historical_liquidity_penalty_multiplier_msat != 0 ||
                   score_params.historical_liquidity_penalty_amount_multiplier_msat != 0 {
                        if let Some(cumulative_success_prob_times_billion) = self.liquidity_history
-                               .calculate_success_probability_times_billion(self.now, *self.last_updated,
-                                       self.decay_params.historical_no_updates_half_life, score_params, amount_msat,
-                                       self.capacity_msat)
+                               .calculate_success_probability_times_billion(
+                                       score_params, amount_msat, self.capacity_msat)
                        {
                                let historical_negative_log10_times_2048 = approx::negative_log10_times_2048(cumulative_success_prob_times_billion + 1, 1024 * 1024 * 1024);
                                res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
@@ -1228,124 +1232,105 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
        /// Returns the lower bound of the channel liquidity balance in this direction.
        #[inline(always)]
        fn min_liquidity_msat(&self) -> u64 {
-               self.decayed_offset_msat(*self.min_liquidity_offset_msat)
+               *self.min_liquidity_offset_msat
        }
 
        /// Returns the upper bound of the channel liquidity balance in this direction.
        #[inline(always)]
        fn max_liquidity_msat(&self) -> u64 {
                self.capacity_msat
-                       .saturating_sub(self.decayed_offset_msat(*self.max_liquidity_offset_msat))
-       }
-
-       fn decayed_offset_msat(&self, offset_msat: u64) -> u64 {
-               let half_life = self.decay_params.liquidity_offset_half_life.as_secs();
-               if half_life != 0 {
-                       // Decay the offset by the appropriate number of half lives. If half of the next half
-                       // life has passed, approximate an additional three-quarter life to help smooth out the
-                       // decay.
-                       let elapsed_time = self.now.duration_since(*self.last_updated).as_secs();
-                       let half_decays = elapsed_time / (half_life / 2);
-                       let decays = half_decays / 2;
-                       let decayed_offset_msat = offset_msat.checked_shr(decays as u32).unwrap_or(0);
-                       if half_decays % 2 == 0 {
-                               decayed_offset_msat
-                       } else {
-                               // 11_585 / 16_384 ~= core::f64::consts::FRAC_1_SQRT_2
-                               // 16_384 == 2^14
-                               (decayed_offset_msat as u128 * 11_585 / 16_384) as u64
-                       }
-               } else {
-                       0
-               }
+                       .saturating_sub(*self.max_liquidity_offset_msat)
        }
 }
 
-impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTracker>, T: Time, U: DerefMut<Target = T>> DirectedChannelLiquidity<L, BRT, T, U> {
+impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTracker>, T: DerefMut<Target = Duration>>
+DirectedChannelLiquidity<L, BRT, T> {
        /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat`.
-       fn failed_at_channel<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
+       fn failed_at_channel<Log: Deref>(
+               &mut self, amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log
+       ) where Log::Target: Logger {
                let existing_max_msat = self.max_liquidity_msat();
                if amount_msat < existing_max_msat {
                        log_debug!(logger, "Setting max liquidity of {} from {} to {}", chan_descr, existing_max_msat, amount_msat);
-                       self.set_max_liquidity_msat(amount_msat);
+                       self.set_max_liquidity_msat(amount_msat, duration_since_epoch);
                } else {
                        log_trace!(logger, "Max liquidity of {} is {} (already less than or equal to {})",
                                chan_descr, existing_max_msat, amount_msat);
                }
-               self.update_history_buckets(0);
+               self.update_history_buckets(0, duration_since_epoch);
        }
 
        /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat` downstream.
-       fn failed_downstream<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
+       fn failed_downstream<Log: Deref>(
+               &mut self, amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log
+       ) where Log::Target: Logger {
                let existing_min_msat = self.min_liquidity_msat();
                if amount_msat > existing_min_msat {
                        log_debug!(logger, "Setting min liquidity of {} from {} to {}", existing_min_msat, chan_descr, amount_msat);
-                       self.set_min_liquidity_msat(amount_msat);
+                       self.set_min_liquidity_msat(amount_msat, duration_since_epoch);
                } else {
                        log_trace!(logger, "Min liquidity of {} is {} (already greater than or equal to {})",
                                chan_descr, existing_min_msat, amount_msat);
                }
-               self.update_history_buckets(0);
+               self.update_history_buckets(0, duration_since_epoch);
        }
 
        /// Adjusts the channel liquidity balance bounds when successfully routing `amount_msat`.
-       fn successful<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
+       fn successful<Log: Deref>(&mut self,
+               amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log
+       ) where Log::Target: Logger {
                let max_liquidity_msat = self.max_liquidity_msat().checked_sub(amount_msat).unwrap_or(0);
                log_debug!(logger, "Subtracting {} from max liquidity of {} (setting it to {})", amount_msat, chan_descr, max_liquidity_msat);
-               self.set_max_liquidity_msat(max_liquidity_msat);
-               self.update_history_buckets(amount_msat);
+               self.set_max_liquidity_msat(max_liquidity_msat, duration_since_epoch);
+               self.update_history_buckets(amount_msat, duration_since_epoch);
        }
 
        /// Updates the history buckets for this channel. Because the history buckets track what we now
        /// know about the channel's state *prior to our payment* (i.e. what we assume is "steady
        /// state"), we allow the caller to set an offset applied to our liquidity bounds which
        /// represents the amount of the successful payment we just made.
-       fn update_history_buckets(&mut self, bucket_offset_msat: u64) {
-               let half_lives = self.now.duration_since(*self.last_updated).as_secs()
-                       .checked_div(self.decay_params.historical_no_updates_half_life.as_secs())
-                       .map(|v| v.try_into().unwrap_or(u32::max_value())).unwrap_or(u32::max_value());
-               self.liquidity_history.min_liquidity_offset_history.time_decay_data(half_lives);
-               self.liquidity_history.max_liquidity_offset_history.time_decay_data(half_lives);
-
-               let min_liquidity_offset_msat = self.decayed_offset_msat(*self.min_liquidity_offset_msat);
+       fn update_history_buckets(&mut self, bucket_offset_msat: u64, duration_since_epoch: Duration) {
                self.liquidity_history.min_liquidity_offset_history.track_datapoint(
-                       min_liquidity_offset_msat + bucket_offset_msat, self.capacity_msat
+                       *self.min_liquidity_offset_msat + bucket_offset_msat, self.capacity_msat
                );
-               let max_liquidity_offset_msat = self.decayed_offset_msat(*self.max_liquidity_offset_msat);
                self.liquidity_history.max_liquidity_offset_history.track_datapoint(
-                       max_liquidity_offset_msat.saturating_sub(bucket_offset_msat), self.capacity_msat
+                       self.max_liquidity_offset_msat.saturating_sub(bucket_offset_msat), self.capacity_msat
                );
+               *self.offset_history_last_updated = duration_since_epoch;
        }
 
        /// Adjusts the lower bound of the channel liquidity balance in this direction.
-       fn set_min_liquidity_msat(&mut self, amount_msat: u64) {
+       fn set_min_liquidity_msat(&mut self, amount_msat: u64, duration_since_epoch: Duration) {
                *self.min_liquidity_offset_msat = amount_msat;
-               *self.max_liquidity_offset_msat = if amount_msat > self.max_liquidity_msat() {
-                       0
-               } else {
-                       self.decayed_offset_msat(*self.max_liquidity_offset_msat)
-               };
-               *self.last_updated = self.now;
+               if amount_msat > self.max_liquidity_msat() {
+                       *self.max_liquidity_offset_msat = 0;
+               }
+               *self.last_updated = duration_since_epoch;
        }
 
        /// Adjusts the upper bound of the channel liquidity balance in this direction.
-       fn set_max_liquidity_msat(&mut self, amount_msat: u64) {
+       fn set_max_liquidity_msat(&mut self, amount_msat: u64, duration_since_epoch: Duration) {
                *self.max_liquidity_offset_msat = self.capacity_msat.checked_sub(amount_msat).unwrap_or(0);
-               *self.min_liquidity_offset_msat = if amount_msat < self.min_liquidity_msat() {
-                       0
-               } else {
-                       self.decayed_offset_msat(*self.min_liquidity_offset_msat)
-               };
-               *self.last_updated = self.now;
+               if amount_msat < *self.min_liquidity_offset_msat {
+                       *self.min_liquidity_offset_msat = 0;
+               }
+               *self.last_updated = duration_since_epoch;
        }
 }
 
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreLookUp for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> ScoreLookUp for ProbabilisticScorer<G, L> where L::Target: Logger {
        type ScoreParams = ProbabilisticScoringFeeParameters;
        fn channel_penalty_msat(
-               &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &ProbabilisticScoringFeeParameters
+               &self, candidate: &CandidateRouteHop, usage: ChannelUsage, score_params: &ProbabilisticScoringFeeParameters
        ) -> u64 {
-               if let Some(penalty) = score_params.manual_node_penalties.get(target) {
+               let (scid, target) = match candidate {
+                       CandidateRouteHop::PublicHop(PublicHopCandidate { info, short_channel_id }) => {
+                               (short_channel_id, info.target())
+                       },
+                       _ => return 0,
+               };
+               let source = candidate.source();
+               if let Some(penalty) = score_params.manual_node_penalties.get(&target) {
                        return *penalty;
                }
 
@@ -1375,17 +1360,17 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreLookUp for Prob
                let amount_msat = usage.amount_msat.saturating_add(usage.inflight_htlc_msat);
                let capacity_msat = usage.effective_capacity.as_msat();
                self.channel_liquidities
-                       .get(&short_channel_id)
-                       .unwrap_or(&ChannelLiquidity::new())
-                       .as_directed(source, target, capacity_msat, self.decay_params)
+                       .get(&scid)
+                       .unwrap_or(&ChannelLiquidity::new(Duration::ZERO))
+                       .as_directed(&source, &target, capacity_msat)
                        .penalty_msat(amount_msat, score_params)
                        .saturating_add(anti_probing_penalty_msat)
                        .saturating_add(base_penalty_msat)
        }
 }
 
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreUpdate for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
-       fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> ScoreUpdate for ProbabilisticScorer<G, L> where L::Target: Logger {
+       fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
                let amount_msat = path.final_value_msat();
                log_trace!(self.logger, "Scoring path through to SCID {} as having failed at {} msat", short_channel_id, amount_msat);
                let network_graph = self.network_graph.read_only();
@@ -1406,15 +1391,17 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreUpdate for Prob
                                if at_failed_channel {
                                        self.channel_liquidities
                                                .entry(hop.short_channel_id)
-                                               .or_insert_with(ChannelLiquidity::new)
-                                               .as_directed_mut(source, &target, capacity_msat, self.decay_params)
-                                               .failed_at_channel(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
+                                               .or_insert_with(|| ChannelLiquidity::new(duration_since_epoch))
+                                               .as_directed_mut(source, &target, capacity_msat)
+                                               .failed_at_channel(amount_msat, duration_since_epoch,
+                                                       format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
                                } else {
                                        self.channel_liquidities
                                                .entry(hop.short_channel_id)
-                                               .or_insert_with(ChannelLiquidity::new)
-                                               .as_directed_mut(source, &target, capacity_msat, self.decay_params)
-                                               .failed_downstream(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
+                                               .or_insert_with(|| ChannelLiquidity::new(duration_since_epoch))
+                                               .as_directed_mut(source, &target, capacity_msat)
+                                               .failed_downstream(amount_msat, duration_since_epoch,
+                                                       format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
                                }
                        } else {
                                log_debug!(self.logger, "Not able to penalize channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
@@ -1424,7 +1411,7 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreUpdate for Prob
                }
        }
 
-       fn payment_path_successful(&mut self, path: &Path) {
+       fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
                let amount_msat = path.final_value_msat();
                log_trace!(self.logger, "Scoring path through SCID {} as having succeeded at {} msat.",
                        path.hops.split_last().map(|(hop, _)| hop.short_channel_id).unwrap_or(0), amount_msat);
@@ -1440,9 +1427,10 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreUpdate for Prob
                                let capacity_msat = channel.effective_capacity().as_msat();
                                self.channel_liquidities
                                        .entry(hop.short_channel_id)
-                                       .or_insert_with(ChannelLiquidity::new)
-                                       .as_directed_mut(source, &target, capacity_msat, self.decay_params)
-                                       .successful(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
+                                       .or_insert_with(|| ChannelLiquidity::new(duration_since_epoch))
+                                       .as_directed_mut(source, &target, capacity_msat)
+                                       .successful(amount_msat, duration_since_epoch,
+                                               format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
                        } else {
                                log_debug!(self.logger, "Not able to learn for channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
                                        hop.short_channel_id);
@@ -1450,19 +1438,59 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreUpdate for Prob
                }
        }
 
-       fn probe_failed(&mut self, path: &Path, short_channel_id: u64) {
-               self.payment_path_failed(path, short_channel_id)
+       fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
+               self.payment_path_failed(path, short_channel_id, duration_since_epoch)
        }
 
-       fn probe_successful(&mut self, path: &Path) {
-               self.payment_path_failed(path, u64::max_value())
+       fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
+               self.payment_path_failed(path, u64::max_value(), duration_since_epoch)
+       }
+
+       fn time_passed(&mut self, duration_since_epoch: Duration) {
+               let decay_params = self.decay_params;
+               self.channel_liquidities.retain(|_scid, liquidity| {
+                       liquidity.min_liquidity_offset_msat =
+                               liquidity.decayed_offset(liquidity.min_liquidity_offset_msat, duration_since_epoch, decay_params);
+                       liquidity.max_liquidity_offset_msat =
+                               liquidity.decayed_offset(liquidity.max_liquidity_offset_msat, duration_since_epoch, decay_params);
+                       liquidity.last_updated = duration_since_epoch;
+
+                       let elapsed_time =
+                               duration_since_epoch.saturating_sub(liquidity.offset_history_last_updated);
+                       if elapsed_time > decay_params.historical_no_updates_half_life {
+                               let half_life = decay_params.historical_no_updates_half_life.as_secs_f64();
+                               if half_life != 0.0 {
+                                       let divisor = powf64(2048.0, elapsed_time.as_secs_f64() / half_life) as u64;
+                                       for bucket in liquidity.min_liquidity_offset_history.buckets.iter_mut() {
+                                               *bucket = ((*bucket as u64) * 1024 / divisor) as u16;
+                                       }
+                                       for bucket in liquidity.max_liquidity_offset_history.buckets.iter_mut() {
+                                               *bucket = ((*bucket as u64) * 1024 / divisor) as u16;
+                                       }
+                                       liquidity.offset_history_last_updated = duration_since_epoch;
+                               }
+                       }
+                       liquidity.min_liquidity_offset_msat != 0 || liquidity.max_liquidity_offset_msat != 0 ||
+                               liquidity.min_liquidity_offset_history.buckets != [0; 32] ||
+                               liquidity.max_liquidity_offset_history.buckets != [0; 32]
+               });
        }
 }
 
 #[cfg(c_bindings)]
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime<G, L, T>
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> Score for ProbabilisticScorer<G, L>
 where L::Target: Logger {}
 
+#[cfg(feature = "std")]
+#[inline]
+fn powf64(n: f64, exp: f64) -> f64 {
+       n.powf(exp)
+}
+#[cfg(not(feature = "std"))]
+fn powf64(n: f64, exp: f64) -> f64 {
+       libm::powf(n as f32, exp as f32) as f64
+}
+
 mod approx {
        const BITS: u32 = 64;
        const HIGHEST_BIT: u32 = BITS - 1;
@@ -1884,7 +1912,7 @@ mod bucketed_history {
        /// in each of 32 buckets.
        #[derive(Clone, Copy)]
        pub(super) struct HistoricalBucketRangeTracker {
-               buckets: [u16; 32],
+               pub(super) buckets: [u16; 32],
        }
 
        /// Buckets are stored in fixed point numbers with a 5 bit fractional part. Thus, the value
@@ -1924,14 +1952,6 @@ mod bucketed_history {
                                self.buckets[bucket] = self.buckets[bucket].saturating_add(BUCKET_FIXED_POINT_ONE);
                        }
                }
-               /// Decay all buckets by the given number of half-lives. Used to more aggressively remove old
-               /// datapoints as we receive newer information.
-               #[inline]
-               pub(super) fn time_decay_data(&mut self, half_lives: u32) {
-                       for e in self.buckets.iter_mut() {
-                               *e = e.checked_shr(half_lives).unwrap_or(0);
-                       }
-               }
        }
 
        impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
@@ -1949,22 +1969,20 @@ mod bucketed_history {
        }
 
        impl<D: Deref<Target = HistoricalBucketRangeTracker>> HistoricalMinMaxBuckets<D> {
-               pub(super) fn get_decayed_buckets<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
-               -> Option<([u16; 32], [u16; 32])> {
-                       let (_, required_decays) = self.get_total_valid_points(now, last_updated, half_life)?;
-
-                       let mut min_buckets = *self.min_liquidity_offset_history;
-                       min_buckets.time_decay_data(required_decays);
-                       let mut max_buckets = *self.max_liquidity_offset_history;
-                       max_buckets.time_decay_data(required_decays);
-                       Some((min_buckets.buckets, max_buckets.buckets))
-               }
                #[inline]
-               pub(super) fn get_total_valid_points<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
-               -> Option<(u64, u32)> {
-                       let required_decays = now.duration_since(last_updated).as_secs()
-                               .checked_div(half_life.as_secs())
-                               .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
+               pub(super) fn calculate_success_probability_times_billion(
+                       &self, params: &ProbabilisticScoringFeeParameters, amount_msat: u64,
+                       capacity_msat: u64
+               ) -> Option<u64> {
+                       // If historical penalties are enabled, we try to calculate a probability of success
+                       // given our historical distribution of min- and max-liquidity bounds in a channel.
+                       // To do so, we walk the set of historical liquidity bucket (min, max) combinations
+                       // (where min_idx < max_idx, as having a minimum above our maximum is an invalid
+                       // state). For each pair, we calculate the probability as if the bucket's corresponding
+                       // min- and max- liquidity bounds were our current liquidity bounds and then multiply
+                       // that probability by the weight of the selected buckets.
+                       let payment_pos = amount_to_pos(amount_msat, capacity_msat);
+                       if payment_pos >= POSITION_TICKS { return None; }
 
                        let mut total_valid_points_tracked = 0;
                        for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
@@ -1976,33 +1994,10 @@ mod bucketed_history {
                        // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme),
                        // treat it as if we were fully decayed.
                        const FULLY_DECAYED: u16 = BUCKET_FIXED_POINT_ONE * BUCKET_FIXED_POINT_ONE;
-                       if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < FULLY_DECAYED.into() {
+                       if total_valid_points_tracked < FULLY_DECAYED.into() {
                                return None;
                        }
 
-                       Some((total_valid_points_tracked, required_decays))
-               }
-
-               #[inline]
-               pub(super) fn calculate_success_probability_times_billion<T: Time>(
-                       &self, now: T, last_updated: T, half_life: Duration,
-                       params: &ProbabilisticScoringFeeParameters, amount_msat: u64, capacity_msat: u64
-               ) -> Option<u64> {
-                       // If historical penalties are enabled, we try to calculate a probability of success
-                       // given our historical distribution of min- and max-liquidity bounds in a channel.
-                       // To do so, we walk the set of historical liquidity bucket (min, max) combinations
-                       // (where min_idx < max_idx, as having a minimum above our maximum is an invalid
-                       // state). For each pair, we calculate the probability as if the bucket's corresponding
-                       // min- and max- liquidity bounds were our current liquidity bounds and then multiply
-                       // that probability by the weight of the selected buckets.
-                       let payment_pos = amount_to_pos(amount_msat, capacity_msat);
-                       if payment_pos >= POSITION_TICKS { return None; }
-
-                       // Check if all our buckets are zero, once decayed and treat it as if we had no data. We
-                       // don't actually use the decayed buckets, though, as that would lose precision.
-                       let (total_valid_points_tracked, _)
-                               = self.get_total_valid_points(now, last_updated, half_life)?;
-
                        let mut cumulative_success_prob_times_billion = 0;
                        // Special-case the 0th min bucket - it generally means we failed a payment, so only
                        // consider the highest (i.e. largest-offset-from-max-capacity) max bucket for all
@@ -2061,7 +2056,7 @@ mod bucketed_history {
 }
 use bucketed_history::{LegacyHistoricalBucketRangeTracker, HistoricalBucketRangeTracker, HistoricalMinMaxBuckets};
 
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Writeable for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> Writeable for ProbabilisticScorer<G, L> where L::Target: Logger {
        #[inline]
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                write_tlv_fields!(w, {
@@ -2071,8 +2066,8 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Writeable for Probab
        }
 }
 
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time>
-ReadableArgs<(ProbabilisticScoringDecayParameters, G, L)> for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref>
+ReadableArgs<(ProbabilisticScoringDecayParameters, G, L)> for ProbabilisticScorer<G, L> where L::Target: Logger {
        #[inline]
        fn read<R: Read>(
                r: &mut R, args: (ProbabilisticScoringDecayParameters, G, L)
@@ -2091,24 +2086,24 @@ ReadableArgs<(ProbabilisticScoringDecayParameters, G, L)> for ProbabilisticScore
        }
 }
 
-impl<T: Time> Writeable for ChannelLiquidity<T> {
+impl Writeable for ChannelLiquidity {
        #[inline]
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
-               let duration_since_epoch = T::duration_since_epoch() - self.last_updated.elapsed();
                write_tlv_fields!(w, {
                        (0, self.min_liquidity_offset_msat, required),
                        // 1 was the min_liquidity_offset_history in octile form
                        (2, self.max_liquidity_offset_msat, required),
                        // 3 was the max_liquidity_offset_history in octile form
-                       (4, duration_since_epoch, required),
+                       (4, self.last_updated, required),
                        (5, Some(self.min_liquidity_offset_history), option),
                        (7, Some(self.max_liquidity_offset_history), option),
+                       (9, self.offset_history_last_updated, required),
                });
                Ok(())
        }
 }
 
-impl<T: Time> Readable for ChannelLiquidity<T> {
+impl Readable for ChannelLiquidity {
        #[inline]
        fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
                let mut min_liquidity_offset_msat = 0;
@@ -2117,28 +2112,19 @@ impl<T: Time> Readable for ChannelLiquidity<T> {
                let mut legacy_max_liq_offset_history: Option<LegacyHistoricalBucketRangeTracker> = None;
                let mut min_liquidity_offset_history: Option<HistoricalBucketRangeTracker> = None;
                let mut max_liquidity_offset_history: Option<HistoricalBucketRangeTracker> = None;
-               let mut duration_since_epoch = Duration::from_secs(0);
+               let mut last_updated = Duration::from_secs(0);
+               let mut offset_history_last_updated = None;
                read_tlv_fields!(r, {
                        (0, min_liquidity_offset_msat, required),
                        (1, legacy_min_liq_offset_history, option),
                        (2, max_liquidity_offset_msat, required),
                        (3, legacy_max_liq_offset_history, option),
-                       (4, duration_since_epoch, required),
+                       (4, last_updated, required),
                        (5, min_liquidity_offset_history, option),
                        (7, max_liquidity_offset_history, option),
+                       (9, offset_history_last_updated, option),
                });
-               // On rust prior to 1.60 `Instant::duration_since` will panic if time goes backwards.
-               // We write `last_updated` as wallclock time even though its ultimately an `Instant` (which
-               // is a time from a monotonic clock usually represented as an offset against boot time).
-               // Thus, we have to construct an `Instant` by subtracting the difference in wallclock time
-               // from the one that was written. However, because `Instant` can panic if we construct one
-               // in the future, we must handle wallclock time jumping backwards, which we do by simply
-               // using `Instant::now()` in that case.
-               let wall_clock_now = T::duration_since_epoch();
-               let now = T::now();
-               let last_updated = if wall_clock_now > duration_since_epoch {
-                       now - (wall_clock_now - duration_since_epoch)
-               } else { now };
+
                if min_liquidity_offset_history.is_none() {
                        if let Some(legacy_buckets) = legacy_min_liq_offset_history {
                                min_liquidity_offset_history = Some(legacy_buckets.into_current());
@@ -2159,22 +2145,21 @@ impl<T: Time> Readable for ChannelLiquidity<T> {
                        min_liquidity_offset_history: min_liquidity_offset_history.unwrap(),
                        max_liquidity_offset_history: max_liquidity_offset_history.unwrap(),
                        last_updated,
+                       offset_history_last_updated: offset_history_last_updated.unwrap_or(last_updated),
                })
        }
 }
 
 #[cfg(test)]
 mod tests {
-       use super::{ChannelLiquidity, HistoricalBucketRangeTracker, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters, ProbabilisticScorerUsingTime};
+       use super::{ChannelLiquidity, HistoricalBucketRangeTracker, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters, ProbabilisticScorer};
        use crate::blinded_path::{BlindedHop, BlindedPath};
        use crate::util::config::UserConfig;
-       use crate::util::time::Time;
-       use crate::util::time::tests::SinceEpoch;
 
        use crate::ln::channelmanager;
        use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
        use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
-       use crate::routing::router::{BlindedTail, Path, RouteHop};
+       use crate::routing::router::{BlindedTail, Path, RouteHop, CandidateRouteHop, PublicHopCandidate};
        use crate::routing::scoring::{ChannelUsage, ScoreLookUp, ScoreUpdate};
        use crate::util::ser::{ReadableArgs, Writeable};
        use crate::util::test_utils::{self, TestLogger};
@@ -2215,9 +2200,6 @@ mod tests {
 
        // `ProbabilisticScorer` tests
 
-       /// A probabilistic scorer for testing with time that can be manually advanced.
-       type ProbabilisticScorer<'a> = ProbabilisticScorerUsingTime::<&'a NetworkGraph<&'a TestLogger>, &'a TestLogger, SinceEpoch>;
-
        fn sender_privkey() -> SecretKey {
                SecretKey::from_slice(&[41; 32]).unwrap()
        }
@@ -2236,10 +2218,6 @@ mod tests {
                PublicKey::from_secret_key(&secp_ctx, &recipient_privkey())
        }
 
-       fn sender_node_id() -> NodeId {
-               NodeId::from_pubkey(&sender_pubkey())
-       }
-
        fn recipient_node_id() -> NodeId {
                NodeId::from_pubkey(&recipient_pubkey())
        }
@@ -2337,19 +2315,22 @@ mod tests {
        #[test]
        fn liquidity_bounds_directed_from_lowest_node_id() {
                let logger = TestLogger::new();
-               let last_updated = SinceEpoch::now();
+               let last_updated = Duration::ZERO;
+               let offset_history_last_updated = Duration::ZERO;
                let network_graph = network_graph(&logger);
                let decay_params = ProbabilisticScoringDecayParameters::default();
                let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
                        .with_channel(42,
                                ChannelLiquidity {
-                                       min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated,
+                                       min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100,
+                                       last_updated, offset_history_last_updated,
                                        min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
                                        max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
                                })
                        .with_channel(43,
                                ChannelLiquidity {
-                                       min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated,
+                                       min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100,
+                                       last_updated, offset_history_last_updated,
                                        min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
                                        max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
                                });
@@ -2362,52 +2343,52 @@ mod tests {
                // Update minimum liquidity.
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&source, &target, 1_000, decay_params);
+                       .as_directed(&source, &target, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 100);
                assert_eq!(liquidity.max_liquidity_msat(), 300);
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&target, &source, 1_000, decay_params);
+                       .as_directed(&target, &source, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 700);
                assert_eq!(liquidity.max_liquidity_msat(), 900);
 
                scorer.channel_liquidities.get_mut(&42).unwrap()
-                       .as_directed_mut(&source, &target, 1_000, decay_params)
-                       .set_min_liquidity_msat(200);
+                       .as_directed_mut(&source, &target, 1_000)
+                       .set_min_liquidity_msat(200, Duration::ZERO);
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&source, &target, 1_000, decay_params);
+                       .as_directed(&source, &target, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 200);
                assert_eq!(liquidity.max_liquidity_msat(), 300);
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&target, &source, 1_000, decay_params);
+                       .as_directed(&target, &source, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 700);
                assert_eq!(liquidity.max_liquidity_msat(), 800);
 
                // Update maximum liquidity.
 
                let liquidity = scorer.channel_liquidities.get(&43).unwrap()
-                       .as_directed(&target, &recipient, 1_000, decay_params);
+                       .as_directed(&target, &recipient, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 700);
                assert_eq!(liquidity.max_liquidity_msat(), 900);
 
                let liquidity = scorer.channel_liquidities.get(&43).unwrap()
-                       .as_directed(&recipient, &target, 1_000, decay_params);
+                       .as_directed(&recipient, &target, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 100);
                assert_eq!(liquidity.max_liquidity_msat(), 300);
 
                scorer.channel_liquidities.get_mut(&43).unwrap()
-                       .as_directed_mut(&target, &recipient, 1_000, decay_params)
-                       .set_max_liquidity_msat(200);
+                       .as_directed_mut(&target, &recipient, 1_000)
+                       .set_max_liquidity_msat(200, Duration::ZERO);
 
                let liquidity = scorer.channel_liquidities.get(&43).unwrap()
-                       .as_directed(&target, &recipient, 1_000, decay_params);
+                       .as_directed(&target, &recipient, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 0);
                assert_eq!(liquidity.max_liquidity_msat(), 200);
 
                let liquidity = scorer.channel_liquidities.get(&43).unwrap()
-                       .as_directed(&recipient, &target, 1_000, decay_params);
+                       .as_directed(&recipient, &target, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 800);
                assert_eq!(liquidity.max_liquidity_msat(), 1000);
        }
@@ -2415,13 +2396,15 @@ mod tests {
        #[test]
        fn resets_liquidity_upper_bound_when_crossed_by_lower_bound() {
                let logger = TestLogger::new();
-               let last_updated = SinceEpoch::now();
+               let last_updated = Duration::ZERO;
+               let offset_history_last_updated = Duration::ZERO;
                let network_graph = network_graph(&logger);
                let decay_params = ProbabilisticScoringDecayParameters::default();
                let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
                        .with_channel(42,
                                ChannelLiquidity {
-                                       min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated,
+                                       min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400,
+                                       last_updated, offset_history_last_updated,
                                        min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
                                        max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
                                });
@@ -2431,42 +2414,42 @@ mod tests {
 
                // Check initial bounds.
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&source, &target, 1_000, decay_params);
+                       .as_directed(&source, &target, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 400);
                assert_eq!(liquidity.max_liquidity_msat(), 800);
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&target, &source, 1_000, decay_params);
+                       .as_directed(&target, &source, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 200);
                assert_eq!(liquidity.max_liquidity_msat(), 600);
 
                // Reset from source to target.
                scorer.channel_liquidities.get_mut(&42).unwrap()
-                       .as_directed_mut(&source, &target, 1_000, decay_params)
-                       .set_min_liquidity_msat(900);
+                       .as_directed_mut(&source, &target, 1_000)
+                       .set_min_liquidity_msat(900, Duration::ZERO);
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&source, &target, 1_000, decay_params);
+                       .as_directed(&source, &target, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 900);
                assert_eq!(liquidity.max_liquidity_msat(), 1_000);
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&target, &source, 1_000, decay_params);
+                       .as_directed(&target, &source, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 0);
                assert_eq!(liquidity.max_liquidity_msat(), 100);
 
                // Reset from target to source.
                scorer.channel_liquidities.get_mut(&42).unwrap()
-                       .as_directed_mut(&target, &source, 1_000, decay_params)
-                       .set_min_liquidity_msat(400);
+                       .as_directed_mut(&target, &source, 1_000)
+                       .set_min_liquidity_msat(400, Duration::ZERO);
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&source, &target, 1_000, decay_params);
+                       .as_directed(&source, &target, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 0);
                assert_eq!(liquidity.max_liquidity_msat(), 600);
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&target, &source, 1_000, decay_params);
+                       .as_directed(&target, &source, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 400);
                assert_eq!(liquidity.max_liquidity_msat(), 1_000);
        }
@@ -2474,13 +2457,15 @@ mod tests {
        #[test]
        fn resets_liquidity_lower_bound_when_crossed_by_upper_bound() {
                let logger = TestLogger::new();
-               let last_updated = SinceEpoch::now();
+               let last_updated = Duration::ZERO;
+               let offset_history_last_updated = Duration::ZERO;
                let network_graph = network_graph(&logger);
                let decay_params = ProbabilisticScoringDecayParameters::default();
                let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
                        .with_channel(42,
                                ChannelLiquidity {
-                                       min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated,
+                                       min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400,
+                                       last_updated, offset_history_last_updated,
                                        min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
                                        max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
                                });
@@ -2490,42 +2475,42 @@ mod tests {
 
                // Check initial bounds.
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&source, &target, 1_000, decay_params);
+                       .as_directed(&source, &target, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 400);
                assert_eq!(liquidity.max_liquidity_msat(), 800);
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&target, &source, 1_000, decay_params);
+                       .as_directed(&target, &source, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 200);
                assert_eq!(liquidity.max_liquidity_msat(), 600);
 
                // Reset from source to target.
                scorer.channel_liquidities.get_mut(&42).unwrap()
-                       .as_directed_mut(&source, &target, 1_000, decay_params)
-                       .set_max_liquidity_msat(300);
+                       .as_directed_mut(&source, &target, 1_000)
+                       .set_max_liquidity_msat(300, Duration::ZERO);
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&source, &target, 1_000, decay_params);
+                       .as_directed(&source, &target, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 0);
                assert_eq!(liquidity.max_liquidity_msat(), 300);
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&target, &source, 1_000, decay_params);
+                       .as_directed(&target, &source, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 700);
                assert_eq!(liquidity.max_liquidity_msat(), 1_000);
 
                // Reset from target to source.
                scorer.channel_liquidities.get_mut(&42).unwrap()
-                       .as_directed_mut(&target, &source, 1_000, decay_params)
-                       .set_max_liquidity_msat(600);
+                       .as_directed_mut(&target, &source, 1_000)
+                       .set_max_liquidity_msat(600, Duration::ZERO);
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&source, &target, 1_000, decay_params);
+                       .as_directed(&source, &target, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 400);
                assert_eq!(liquidity.max_liquidity_msat(), 1_000);
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&target, &source, 1_000, decay_params);
+                       .as_directed(&target, &source, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 0);
                assert_eq!(liquidity.max_liquidity_msat(), 600);
        }
@@ -2541,45 +2526,52 @@ mod tests {
                let decay_params = ProbabilisticScoringDecayParameters::default();
                let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
                let source = source_node_id();
-               let target = target_node_id();
 
                let usage = ChannelUsage {
                        amount_msat: 1_024,
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               let network_graph = network_graph.read_only();
+               let channel = network_graph.channel(42).unwrap();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 10_240, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 102_400, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 47);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 47);
                let usage = ChannelUsage { amount_msat: 1_023_999, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 2_000);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2_000);
 
                let usage = ChannelUsage {
                        amount_msat: 128,
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 58);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 58);
                let usage = ChannelUsage { amount_msat: 256, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 125);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 125);
                let usage = ChannelUsage { amount_msat: 374, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 198);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 198);
                let usage = ChannelUsage { amount_msat: 512, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 300);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
                let usage = ChannelUsage { amount_msat: 640, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 425);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 425);
                let usage = ChannelUsage { amount_msat: 768, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 602);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 602);
                let usage = ChannelUsage { amount_msat: 896, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 902);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 902);
        }
 
        #[test]
        fn constant_penalty_outside_liquidity_bounds() {
                let logger = TestLogger::new();
-               let last_updated = SinceEpoch::now();
+               let last_updated = Duration::ZERO;
+               let offset_history_last_updated = Duration::ZERO;
                let network_graph = network_graph(&logger);
                let params = ProbabilisticScoringFeeParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
@@ -2592,24 +2584,30 @@ mod tests {
                let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
                        .with_channel(42,
                                ChannelLiquidity {
-                                       min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40, last_updated,
+                                       min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40,
+                                       last_updated, offset_history_last_updated,
                                        min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
                                        max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
                                });
                let source = source_node_id();
-               let target = target_node_id();
 
                let usage = ChannelUsage {
                        amount_msat: 39,
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 100, htlc_maximum_msat: 1_000 },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 50, ..usage };
-               assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
-               assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+               assert_ne!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
+               assert_ne!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
                let usage = ChannelUsage { amount_msat: 61, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
        }
 
        #[test]
@@ -2621,7 +2619,6 @@ mod tests {
                        ..ProbabilisticScoringFeeParameters::zero_penalty()
                };
                let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
-               let sender = sender_node_id();
                let source = source_node_id();
                let usage = ChannelUsage {
                        amount_msat: 500,
@@ -2630,14 +2627,20 @@ mod tests {
                };
                let failed_path = payment_path_for_amount(500);
                let successful_path = payment_path_for_amount(200);
+               let channel = &network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 41,
+               });
 
-               assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, &params), 301);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 301);
 
-               scorer.payment_path_failed(&failed_path, 41);
-               assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, &params), 301);
+               scorer.payment_path_failed(&failed_path, 41, Duration::ZERO);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 301);
 
-               scorer.payment_path_successful(&successful_path);
-               assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, &params), 301);
+               scorer.payment_path_successful(&successful_path, Duration::ZERO);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 301);
        }
 
        #[test]
@@ -2650,7 +2653,6 @@ mod tests {
                };
                let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
                let source = source_node_id();
-               let target = target_node_id();
                let path = payment_path_for_amount(500);
 
                let usage = ChannelUsage {
@@ -2658,20 +2660,26 @@ mod tests {
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 128);
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 128);
                let usage = ChannelUsage { amount_msat: 500, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 301);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 301);
                let usage = ChannelUsage { amount_msat: 750, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 602);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 602);
 
-               scorer.payment_path_failed(&path, 43);
+               scorer.payment_path_failed(&path, 43, Duration::ZERO);
 
                let usage = ChannelUsage { amount_msat: 250, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 500, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 750, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 300);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
        }
 
        #[test]
@@ -2685,7 +2693,6 @@ mod tests {
                };
                let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
                let source = source_node_id();
-               let target = target_node_id();
                let path = payment_path_for_amount(500);
 
                let usage = ChannelUsage {
@@ -2693,20 +2700,26 @@ mod tests {
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 128);
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 128);
                let usage = ChannelUsage { amount_msat: 500, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 301);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 301);
                let usage = ChannelUsage { amount_msat: 750, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 602);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 602);
 
-               scorer.payment_path_failed(&path, 42);
+               scorer.payment_path_failed(&path, 42, Duration::ZERO);
 
                let usage = ChannelUsage { amount_msat: 250, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 300);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
                let usage = ChannelUsage { amount_msat: 500, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
                let usage = ChannelUsage { amount_msat: 750, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
        }
 
        #[test]
@@ -2741,7 +2754,6 @@ mod tests {
                let node_a = NodeId::from_pubkey(&pub_a);
                let node_b = NodeId::from_pubkey(&pub_b);
                let node_c = NodeId::from_pubkey(&pub_c);
-               let node_d = NodeId::from_pubkey(&pub_d);
 
                let params = ProbabilisticScoringFeeParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
@@ -2754,17 +2766,53 @@ mod tests {
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &node_a, &node_b, usage, &params), 128);
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&node_a).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 128);
                // Note that a default liquidity bound is used for B -> C as no channel exists
-               assert_eq!(scorer.channel_penalty_msat(43, &node_b, &node_c, usage, &params), 128);
-               assert_eq!(scorer.channel_penalty_msat(44, &node_c, &node_d, usage, &params), 128);
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&node_b).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 43,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 128);
+               let channel = network_graph.read_only().channel(44).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&node_c).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 44,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 128);
 
-               scorer.payment_path_failed(&Path { hops: path, blinded_tail: None }, 43);
+               scorer.payment_path_failed(&Path { hops: path, blinded_tail: None }, 43, Duration::ZERO);
 
-               assert_eq!(scorer.channel_penalty_msat(42, &node_a, &node_b, usage, &params), 80);
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&node_a).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 80);
                // Note that a default liquidity bound is used for B -> C as no channel exists
-               assert_eq!(scorer.channel_penalty_msat(43, &node_b, &node_c, usage, &params), 128);
-               assert_eq!(scorer.channel_penalty_msat(44, &node_c, &node_d, usage, &params), 128);
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&node_b).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 43,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 128);
+               let channel = network_graph.read_only().channel(44).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&node_c).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 44,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 128);
        }
 
        #[test]
@@ -2776,25 +2824,39 @@ mod tests {
                        ..ProbabilisticScoringFeeParameters::zero_penalty()
                };
                let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
-               let sender = sender_node_id();
                let source = source_node_id();
-               let target = target_node_id();
-               let recipient = recipient_node_id();
                let usage = ChannelUsage {
                        amount_msat: 250,
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
                };
+               let network_graph = network_graph.read_only().channels().clone();
+               let channel_42 = network_graph.get(&42).unwrap();
+               let channel_43 = network_graph.get(&43).unwrap();
+               let (info, _) = channel_42.as_directed_from(&source).unwrap();
+               let candidate_41 = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 41,
+               });
+               let (info, target) = channel_42.as_directed_from(&source).unwrap();
+               let candidate_42 = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               let (info, _) = channel_43.as_directed_from(&target).unwrap();
+               let candidate_43 = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 43,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate_41, usage, &params), 128);
+               assert_eq!(scorer.channel_penalty_msat(&candidate_42, usage, &params), 128);
+               assert_eq!(scorer.channel_penalty_msat(&candidate_43, usage, &params), 128);
 
-               assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, &params), 128);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 128);
-               assert_eq!(scorer.channel_penalty_msat(43, &target, &recipient, usage, &params), 128);
-
-               scorer.payment_path_successful(&payment_path_for_amount(500));
+               scorer.payment_path_successful(&payment_path_for_amount(500), Duration::ZERO);
 
-               assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, &params), 128);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 300);
-               assert_eq!(scorer.channel_penalty_msat(43, &target, &recipient, usage, &params), 300);
+               assert_eq!(scorer.channel_penalty_msat(&candidate_41, usage, &params), 128);
+               assert_eq!(scorer.channel_penalty_msat(&candidate_42, usage, &params), 300);
+               assert_eq!(scorer.channel_penalty_msat(&candidate_43, usage, &params), 300);
        }
 
        #[test]
@@ -2812,120 +2874,80 @@ mod tests {
                };
                let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
                let source = source_node_id();
-               let target = target_node_id();
 
                let usage = ChannelUsage {
                        amount_msat: 0,
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 1_023, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 2_000);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2_000);
 
-               scorer.payment_path_failed(&payment_path_for_amount(768), 42);
-               scorer.payment_path_failed(&payment_path_for_amount(128), 43);
+               scorer.payment_path_failed(&payment_path_for_amount(768), 42, Duration::ZERO);
+               scorer.payment_path_failed(&payment_path_for_amount(128), 43, Duration::ZERO);
 
                // Initial penalties
                let usage = ChannelUsage { amount_msat: 128, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 256, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 93);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 93);
                let usage = ChannelUsage { amount_msat: 768, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 1_479);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 1_479);
                let usage = ChannelUsage { amount_msat: 896, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
-
-               // No decay
-               SinceEpoch::advance(Duration::from_secs(4));
-               let usage = ChannelUsage { amount_msat: 128, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
-               let usage = ChannelUsage { amount_msat: 256, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 93);
-               let usage = ChannelUsage { amount_msat: 768, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 1_479);
-               let usage = ChannelUsage { amount_msat: 896, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
 
                // Half decay (i.e., three-quarter life)
-               SinceEpoch::advance(Duration::from_secs(1));
+               scorer.time_passed(Duration::from_secs(5));
                let usage = ChannelUsage { amount_msat: 128, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 22);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 22);
                let usage = ChannelUsage { amount_msat: 256, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 106);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 106);
                let usage = ChannelUsage { amount_msat: 768, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 921);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 921);
                let usage = ChannelUsage { amount_msat: 896, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
 
                // One decay (i.e., half life)
-               SinceEpoch::advance(Duration::from_secs(5));
+               scorer.time_passed(Duration::from_secs(10));
                let usage = ChannelUsage { amount_msat: 64, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 128, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 34);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 34);
                let usage = ChannelUsage { amount_msat: 896, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 1_970);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 1_970);
                let usage = ChannelUsage { amount_msat: 960, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
 
                // Fully decay liquidity lower bound.
-               SinceEpoch::advance(Duration::from_secs(10 * 7));
+               scorer.time_passed(Duration::from_secs(10 * 8));
                let usage = ChannelUsage { amount_msat: 0, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 1, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 1_023, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 2_000);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2_000);
                let usage = ChannelUsage { amount_msat: 1_024, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
 
                // Fully decay liquidity upper bound.
-               SinceEpoch::advance(Duration::from_secs(10));
+               scorer.time_passed(Duration::from_secs(10 * 9));
                let usage = ChannelUsage { amount_msat: 0, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 1_024, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
 
-               SinceEpoch::advance(Duration::from_secs(10));
+               scorer.time_passed(Duration::from_secs(10 * 10));
                let usage = ChannelUsage { amount_msat: 0, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
                let usage = ChannelUsage { amount_msat: 1_024, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
-       }
-
-       #[test]
-       fn decays_liquidity_bounds_without_shift_overflow() {
-               let logger = TestLogger::new();
-               let network_graph = network_graph(&logger);
-               let params = ProbabilisticScoringFeeParameters {
-                       liquidity_penalty_multiplier_msat: 1_000,
-                       ..ProbabilisticScoringFeeParameters::zero_penalty()
-               };
-               let decay_params = ProbabilisticScoringDecayParameters {
-                       liquidity_offset_half_life: Duration::from_secs(10),
-                       ..ProbabilisticScoringDecayParameters::default()
-               };
-               let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
-               let source = source_node_id();
-               let target = target_node_id();
-               let usage = ChannelUsage {
-                       amount_msat: 256,
-                       inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
-               };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 125);
-
-               scorer.payment_path_failed(&payment_path_for_amount(512), 42);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 281);
-
-               // An unchecked right shift 64 bits or more in DirectedChannelLiquidity::decayed_offset_msat
-               // would cause an overflow.
-               SinceEpoch::advance(Duration::from_secs(10 * 64));
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 125);
-
-               SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 125);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
        }
 
        #[test]
@@ -2942,37 +2964,42 @@ mod tests {
                };
                let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
                let source = source_node_id();
-               let target = target_node_id();
                let usage = ChannelUsage {
                        amount_msat: 512,
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
                };
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
 
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 300);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
 
                // More knowledge gives higher confidence (256, 768), meaning a lower penalty.
-               scorer.payment_path_failed(&payment_path_for_amount(768), 42);
-               scorer.payment_path_failed(&payment_path_for_amount(256), 43);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 281);
+               scorer.payment_path_failed(&payment_path_for_amount(768), 42, Duration::ZERO);
+               scorer.payment_path_failed(&payment_path_for_amount(256), 43, Duration::ZERO);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 281);
 
                // Decaying knowledge gives less confidence (128, 896), meaning a higher penalty.
-               SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 291);
+               scorer.time_passed(Duration::from_secs(10));
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 291);
 
                // Reducing the upper bound gives more confidence (128, 832) that the payment amount (512)
                // is closer to the upper bound, meaning a higher penalty.
-               scorer.payment_path_successful(&payment_path_for_amount(64));
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 331);
+               scorer.payment_path_successful(&payment_path_for_amount(64), Duration::from_secs(10));
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 331);
 
                // Increasing the lower bound gives more confidence (256, 832) that the payment amount (512)
                // is closer to the lower bound, meaning a lower penalty.
-               scorer.payment_path_failed(&payment_path_for_amount(256), 43);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 245);
+               scorer.payment_path_failed(&payment_path_for_amount(256), 43, Duration::from_secs(10));
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 245);
 
                // Further decaying affects the lower bound more than the upper bound (128, 928).
-               SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 280);
+               scorer.time_passed(Duration::from_secs(20));
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 280);
        }
 
        #[test]
@@ -2990,33 +3017,37 @@ mod tests {
                };
                let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
                let source = source_node_id();
-               let target = target_node_id();
                let usage = ChannelUsage {
                        amount_msat: 500,
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
                };
 
-               scorer.payment_path_failed(&payment_path_for_amount(500), 42);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+               scorer.payment_path_failed(&payment_path_for_amount(500), 42, Duration::ZERO);
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
 
-               SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 473);
+               scorer.time_passed(Duration::from_secs(10));
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 473);
 
-               scorer.payment_path_failed(&payment_path_for_amount(250), 43);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 300);
+               scorer.payment_path_failed(&payment_path_for_amount(250), 43, Duration::from_secs(10));
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
 
                let mut serialized_scorer = Vec::new();
                scorer.write(&mut serialized_scorer).unwrap();
 
                let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
                let deserialized_scorer =
-                       <ProbabilisticScorer>::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap();
-               assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage, &params), 300);
+                       <ProbabilisticScorer<_, _>>::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap();
+               assert_eq!(deserialized_scorer.channel_penalty_msat(&candidate, usage, &params), 300);
        }
 
-       #[test]
-       fn decays_persisted_liquidity_bounds() {
+       fn do_decays_persisted_liquidity_bounds(decay_before_reload: bool) {
                let logger = TestLogger::new();
                let network_graph = network_graph(&logger);
                let params = ProbabilisticScoringFeeParameters {
@@ -3030,31 +3061,48 @@ mod tests {
                };
                let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
                let source = source_node_id();
-               let target = target_node_id();
                let usage = ChannelUsage {
                        amount_msat: 500,
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
                };
 
-               scorer.payment_path_failed(&payment_path_for_amount(500), 42);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+               scorer.payment_path_failed(&payment_path_for_amount(500), 42, Duration::ZERO);
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
+
+               if decay_before_reload {
+                       scorer.time_passed(Duration::from_secs(10));
+               }
 
                let mut serialized_scorer = Vec::new();
                scorer.write(&mut serialized_scorer).unwrap();
 
-               SinceEpoch::advance(Duration::from_secs(10));
-
                let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
-               let deserialized_scorer =
-                       <ProbabilisticScorer>::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap();
-               assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage, &params), 473);
+               let mut deserialized_scorer =
+                       <ProbabilisticScorer<_, _>>::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap();
+               if !decay_before_reload {
+                       scorer.time_passed(Duration::from_secs(10));
+                       deserialized_scorer.time_passed(Duration::from_secs(10));
+               }
+               assert_eq!(deserialized_scorer.channel_penalty_msat(&candidate, usage, &params), 473);
 
-               scorer.payment_path_failed(&payment_path_for_amount(250), 43);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 300);
+               scorer.payment_path_failed(&payment_path_for_amount(250), 43, Duration::from_secs(10));
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
 
-               SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage, &params), 370);
+               deserialized_scorer.time_passed(Duration::from_secs(20));
+               assert_eq!(deserialized_scorer.channel_penalty_msat(&candidate, usage, &params), 370);
+       }
+
+       #[test]
+       fn decays_persisted_liquidity_bounds() {
+               do_decays_persisted_liquidity_bounds(false);
+               do_decays_persisted_liquidity_bounds(true);
        }
 
        #[test]
@@ -3066,54 +3114,59 @@ mod tests {
                let params = ProbabilisticScoringFeeParameters::default();
                let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
                let source = source_node_id();
-               let target = target_node_id();
 
                let usage = ChannelUsage {
                        amount_msat: 100_000_000,
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 950_000_000, htlc_maximum_msat: 1_000 },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 11497);
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 11497);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 7408);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 7408);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 2_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 6151);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 6151);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 3_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 5427);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 5427);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 4_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 4955);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 4955);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 5_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 4736);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 4736);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 6_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 4484);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 4484);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_450_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 4484);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 4484);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 4263);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 4263);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 8_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 4263);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 4263);
                let usage = ChannelUsage {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 9_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 4044);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 4044);
        }
 
        #[test]
@@ -3121,7 +3174,6 @@ mod tests {
                let logger = TestLogger::new();
                let network_graph = network_graph(&logger);
                let source = source_node_id();
-               let target = target_node_id();
                let usage = ChannelUsage {
                        amount_msat: 128,
                        inflight_htlc_msat: 0,
@@ -3133,14 +3185,20 @@ mod tests {
                        ..ProbabilisticScoringFeeParameters::zero_penalty()
                };
                let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 58);
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 58);
 
                let params = ProbabilisticScoringFeeParameters {
                        base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000,
                        anti_probing_penalty_msat: 0, ..ProbabilisticScoringFeeParameters::zero_penalty()
                };
                let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 558);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 558);
 
                let params = ProbabilisticScoringFeeParameters {
                        base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000,
@@ -3149,7 +3207,7 @@ mod tests {
                };
 
                let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 558 + 128);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 558 + 128);
        }
 
        #[test]
@@ -3157,7 +3215,6 @@ mod tests {
                let logger = TestLogger::new();
                let network_graph = network_graph(&logger);
                let source = source_node_id();
-               let target = target_node_id();
                let usage = ChannelUsage {
                        amount_msat: 512_000,
                        inflight_htlc_msat: 0,
@@ -3170,7 +3227,13 @@ mod tests {
                        ..ProbabilisticScoringFeeParameters::zero_penalty()
                };
                let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 300);
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
 
                let params = ProbabilisticScoringFeeParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
@@ -3178,7 +3241,7 @@ mod tests {
                        ..ProbabilisticScoringFeeParameters::zero_penalty()
                };
                let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 337);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 337);
        }
 
        #[test]
@@ -3186,20 +3249,24 @@ mod tests {
                let logger = TestLogger::new();
                let network_graph = network_graph(&logger);
                let source = source_node_id();
-               let target = target_node_id();
                let usage = ChannelUsage {
                        amount_msat: u64::max_value(),
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Infinite,
                };
-
                let params = ProbabilisticScoringFeeParameters {
                        liquidity_penalty_multiplier_msat: 40_000,
                        ..ProbabilisticScoringFeeParameters::zero_penalty()
                };
                let decay_params = ProbabilisticScoringDecayParameters::zero_penalty();
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
                let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 80_000);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 80_000);
        }
 
        #[test]
@@ -3212,17 +3279,23 @@ mod tests {
                };
                let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
                let source = source_node_id();
-               let target = target_node_id();
 
                let usage = ChannelUsage {
                        amount_msat: 750,
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
                };
-               assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+               let network_graph = network_graph.read_only();
+               let channel = network_graph.channel(42).unwrap();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_ne!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
 
                let usage = ChannelUsage { inflight_htlc_msat: 251, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
        }
 
        #[test]
@@ -3232,7 +3305,6 @@ mod tests {
                let params = ProbabilisticScoringFeeParameters::default();
                let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
                let source = source_node_id();
-               let target = target_node_id();
 
                let base_penalty_msat = params.base_penalty_msat;
                let usage = ChannelUsage {
@@ -3240,13 +3312,20 @@ mod tests {
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::ExactLiquidity { liquidity_msat: 1_000 },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), base_penalty_msat);
+               let network_graph = network_graph.read_only();
+               let channel = network_graph.channel(42).unwrap();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), base_penalty_msat);
 
                let usage = ChannelUsage { amount_msat: 1_000, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), base_penalty_msat);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), base_penalty_msat);
 
                let usage = ChannelUsage { amount_msat: 1_001, ..usage };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), u64::max_value());
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
        }
 
        #[test]
@@ -3277,16 +3356,36 @@ mod tests {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
                };
 
-               // With no historical data the normal liquidity penalty calculation is used.
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 168);
+               {
+                       let network_graph = network_graph.read_only();
+                       let channel = network_graph.channel(42).unwrap();
+                       let (info, _) = channel.as_directed_from(&source).unwrap();
+                       let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                               info,
+                               short_channel_id: 42,
+                       });
+
+                       // With no historical data the normal liquidity penalty calculation is used.
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 168);
+               }
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
-                       None);
+               None);
                assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 42, &params),
-                       None);
-
-               scorer.payment_path_failed(&payment_path_for_amount(1), 42);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 2048);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage_1, &params), 249);
+               None);
+
+               scorer.payment_path_failed(&payment_path_for_amount(1), 42, Duration::ZERO);
+               {
+                       let network_graph = network_graph.read_only();
+                       let channel = network_graph.channel(42).unwrap();
+                       let (info, _) = channel.as_directed_from(&source).unwrap();
+                       let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                               info,
+                               short_channel_id: 42,
+                       });
+
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2048);
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage_1, &params), 249);
+               }
                // The "it failed" increment is 32, where the probability should lie several buckets into
                // the first octile.
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
@@ -3299,8 +3398,18 @@ mod tests {
 
                // Even after we tell the scorer we definitely have enough available liquidity, it will
                // still remember that there was some failure in the past, and assign a non-0 penalty.
-               scorer.payment_path_failed(&payment_path_for_amount(1000), 43);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 105);
+               scorer.payment_path_failed(&payment_path_for_amount(1000), 43, Duration::ZERO);
+               {
+                       let network_graph = network_graph.read_only();
+                       let channel = network_graph.channel(42).unwrap();
+                       let (info, _) = channel.as_directed_from(&source).unwrap();
+                       let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                               info,
+                               short_channel_id: 42,
+                       });
+
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 105);
+               }
                // The first points should be decayed just slightly and the last bucket has a new point.
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
                        Some(([31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0],
@@ -3319,33 +3428,56 @@ mod tests {
 
                // Advance the time forward 16 half-lives (which the docs claim will ensure all data is
                // gone), and check that we're back to where we started.
-               SinceEpoch::advance(Duration::from_secs(10 * 16));
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 168);
+               scorer.time_passed(Duration::from_secs(10 * 16));
+               {
+                       let network_graph = network_graph.read_only();
+                       let channel = network_graph.channel(42).unwrap();
+                       let (info, _) = channel.as_directed_from(&source).unwrap();
+                       let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                               info,
+                               short_channel_id: 42,
+                       });
+
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 168);
+               }
                // Once fully decayed we still have data, but its all-0s. In the future we may remove the
                // data entirely instead.
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
-                       None);
+                       Some(([0; 32], [0; 32])));
                assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 1, &params), None);
 
-               let mut usage = ChannelUsage {
+               let usage = ChannelUsage {
                        amount_msat: 100,
                        inflight_htlc_msat: 1024,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
                };
-               scorer.payment_path_failed(&payment_path_for_amount(1), 42);
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 2050);
-               usage.inflight_htlc_msat = 0;
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 866);
-
-               let usage = ChannelUsage {
-                       amount_msat: 1,
-                       inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: 0 },
-               };
-               assert_eq!(scorer.channel_penalty_msat(42, &target, &source, usage, &params), 2048);
+               scorer.payment_path_failed(&payment_path_for_amount(1), 42, Duration::from_secs(10 * 16));
+               {
+                       let network_graph = network_graph.read_only();
+                       let channel = network_graph.channel(42).unwrap();
+                       let (info, _) = channel.as_directed_from(&source).unwrap();
+                       let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                               info,
+                               short_channel_id: 42,
+                       });
+
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2050);
+
+                       let usage = ChannelUsage {
+                               amount_msat: 1,
+                               inflight_htlc_msat: 0,
+                               effective_capacity: EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: 0 },
+                       };
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2048);
+               }
 
                // Advance to decay all liquidity offsets to zero.
-               SinceEpoch::advance(Duration::from_secs(60 * 60 * 10));
+               scorer.time_passed(Duration::from_secs(10 * (16 + 60 * 60)));
+
+               // Once even the bounds have decayed information about the channel should be removed
+               // entirely.
+               assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
+                       None);
 
                // Use a path in the opposite direction, which have zero for htlc_maximum_msat. This will
                // ensure that the effective capacity is zero to test division-by-zero edge cases.
@@ -3354,7 +3486,7 @@ mod tests {
                        path_hop(source_pubkey(), 42, 1),
                        path_hop(sender_pubkey(), 41, 0),
                ];
-               scorer.payment_path_failed(&Path { hops: path, blinded_tail: None }, 42);
+               scorer.payment_path_failed(&Path { hops: path, blinded_tail: None }, 42, Duration::from_secs(10 * (16 + 60 * 60)));
        }
 
        #[test]
@@ -3362,7 +3494,6 @@ mod tests {
                let logger = TestLogger::new();
                let network_graph = network_graph(&logger);
                let source = source_node_id();
-               let target = target_node_id();
                let params = ProbabilisticScoringFeeParameters {
                        anti_probing_penalty_msat: 500,
                        ..ProbabilisticScoringFeeParameters::zero_penalty()
@@ -3375,7 +3506,14 @@ mod tests {
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               let network_graph = network_graph.read_only();
+               let channel = network_graph.channel(42).unwrap();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
 
                // Check we receive anti-probing penalty for htlc_maximum_msat == channel_capacity.
                let usage = ChannelUsage {
@@ -3383,7 +3521,7 @@ mod tests {
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_024_000 },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 500);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 500);
 
                // Check we receive anti-probing penalty for htlc_maximum_msat == channel_capacity/2.
                let usage = ChannelUsage {
@@ -3391,7 +3529,7 @@ mod tests {
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 512_000 },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 500);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 500);
 
                // Check we receive no anti-probing penalty for htlc_maximum_msat == channel_capacity/2 - 1.
                let usage = ChannelUsage {
@@ -3399,7 +3537,7 @@ mod tests {
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 511_999 },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 0);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
        }
 
        #[test]
@@ -3414,13 +3552,18 @@ mod tests {
                let decay_params = ProbabilisticScoringDecayParameters::default();
                let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
                let source = source_node_id();
-               let target = target_node_id();
                let usage = ChannelUsage {
                        amount_msat: 512,
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
                };
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 300);
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, target) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
 
                let mut path = payment_path_for_amount(768);
                let recipient_hop = path.hops.pop().unwrap();
@@ -3442,12 +3585,12 @@ mod tests {
                // final value is taken into account.
                assert!(scorer.channel_liquidities.get(&42).is_none());
 
-               scorer.payment_path_failed(&path, 42);
+               scorer.payment_path_failed(&path, 42, Duration::ZERO);
                path.blinded_tail.as_mut().unwrap().final_value_msat = 256;
-               scorer.payment_path_failed(&path, 43);
+               scorer.payment_path_failed(&path, 43, Duration::ZERO);
 
                let liquidity = scorer.channel_liquidities.get(&42).unwrap()
-                       .as_directed(&source, &target, 1_000, decay_params);
+                       .as_directed(&source, &target, 1_000);
                assert_eq!(liquidity.min_liquidity_msat(), 256);
                assert_eq!(liquidity.max_liquidity_msat(), 768);
        }
@@ -3476,7 +3619,6 @@ mod tests {
 
                let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
                let source = source_node_id();
-               let target = target_node_id();
 
                let mut amount_msat = 10_000_000;
                let usage = ChannelUsage {
@@ -3484,19 +3626,25 @@ mod tests {
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: capacity_msat },
                };
+               let channel = network_graph.read_only().channel(42).unwrap().to_owned();
+               let (info, target) = channel.as_directed_from(&source).unwrap();
+               let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                       info,
+                       short_channel_id: 42,
+               });
                // With no historical data the normal liquidity penalty calculation is used, which results
                // in a success probability of ~75%.
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 1269);
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 1269);
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
                        None);
                assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 42, &params),
                        None);
 
                // Fail to pay once, and then check the buckets and penalty.
-               scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42);
+               scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42, Duration::ZERO);
                // The penalty should be the maximum penalty, as the payment we're scoring is now in the
                // same bucket which is the only maximum datapoint.
-               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params),
+               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params),
                        2048 + 2048 * amount_msat / super::AMOUNT_PENALTY_DIVISOR);
                // The "it failed" increment is 32, which we should apply to the first upper-bound (between
                // 6k sats and 12k sats).
@@ -3517,7 +3665,7 @@ mod tests {
                // ...but once we see a failure, we consider the payment to be substantially less likely,
                // even though not a probability of zero as we still look at the second max bucket which
                // now shows 31.
-               scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42);
+               scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42, Duration::ZERO);
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
                        Some(([63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                                [32, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])));
@@ -3525,3 +3673,61 @@ mod tests {
                        Some(0.0));
        }
 }
+
+#[cfg(ldk_bench)]
+pub mod benches {
+       use super::*;
+       use criterion::Criterion;
+       use crate::routing::router::{bench_utils, RouteHop};
+       use crate::util::test_utils::TestLogger;
+       use crate::ln::features::{ChannelFeatures, NodeFeatures};
+
+       pub fn decay_100k_channel_bounds(bench: &mut Criterion) {
+               let logger = TestLogger::new();
+               let network_graph = bench_utils::read_network_graph(&logger).unwrap();
+               let mut scorer = ProbabilisticScorer::new(Default::default(), &network_graph, &logger);
+               // Score a number of random channels
+               let mut seed: u64 = 0xdeadbeef;
+               for _ in 0..100_000 {
+                       seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
+                       let (victim, victim_dst, amt) = {
+                               let rong = network_graph.read_only();
+                               let channels = rong.channels();
+                               let chan = channels.unordered_iter()
+                                       .skip((seed as usize) % channels.len())
+                                       .next().unwrap();
+                               seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
+                               let amt = seed % chan.1.capacity_sats.map(|c| c * 1000)
+                                       .or(chan.1.one_to_two.as_ref().map(|info| info.htlc_maximum_msat))
+                                       .or(chan.1.two_to_one.as_ref().map(|info| info.htlc_maximum_msat))
+                                       .unwrap_or(1_000_000_000).saturating_add(1);
+                               (*chan.0, chan.1.node_two, amt)
+                       };
+                       let path = Path {
+                               hops: vec![RouteHop {
+                                       pubkey: victim_dst.as_pubkey().unwrap(),
+                                       node_features: NodeFeatures::empty(),
+                                       short_channel_id: victim,
+                                       channel_features: ChannelFeatures::empty(),
+                                       fee_msat: amt,
+                                       cltv_expiry_delta: 42,
+                                       maybe_announced_channel: true,
+                               }],
+                               blinded_tail: None
+                       };
+                       seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
+                       if seed % 1 == 0 {
+                               scorer.probe_failed(&path, victim, Duration::ZERO);
+                       } else {
+                               scorer.probe_successful(&path, Duration::ZERO);
+                       }
+               }
+               let mut cur_time = Duration::ZERO;
+                       cur_time += Duration::from_millis(1);
+                       scorer.time_passed(cur_time);
+               bench.bench_function("decay_100k_channel_bounds", |b| b.iter(|| {
+                       cur_time += Duration::from_millis(1);
+                       scorer.time_passed(cur_time);
+               }));
+       }
+}
diff --git a/lightning/src/sign/ecdsa.rs b/lightning/src/sign/ecdsa.rs
new file mode 100644 (file)
index 0000000..2e98213
--- /dev/null
@@ -0,0 +1,170 @@
+//! Defines ECDSA-specific signer types.
+
+use bitcoin::blockdata::transaction::Transaction;
+
+use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+use bitcoin::secp256k1::ecdsa::Signature;
+use bitcoin::secp256k1;
+
+use crate::util::ser::Writeable;
+use crate::ln::PaymentPreimage;
+use crate::ln::chan_utils::{HTLCOutputInCommitment, HolderCommitmentTransaction, CommitmentTransaction, ClosingTransaction};
+use crate::ln::msgs::UnsignedChannelAnnouncement;
+
+use crate::prelude::*;
+use crate::sign::{ChannelSigner, HTLCDescriptor};
+
+/// A trait to sign Lightning channel transactions as described in
+/// [BOLT 3](https://github.com/lightning/bolts/blob/master/03-transactions.md).
+///
+/// Signing services could be implemented on a hardware wallet and should implement signing
+/// policies in order to be secure. Please refer to the [VLS Policy
+/// Controls](https://gitlab.com/lightning-signer/validating-lightning-signer/-/blob/main/docs/policy-controls.md)
+/// for an example of such policies.
+pub trait EcdsaChannelSigner: ChannelSigner {
+       /// Create a signature for a counterparty's commitment transaction and associated HTLC transactions.
+       ///
+       /// Note that if signing fails or is rejected, the channel will be force-closed.
+       ///
+       /// Policy checks should be implemented in this function, including checking the amount
+       /// sent to us and checking the HTLCs.
+       ///
+       /// The preimages of outbound and inbound HTLCs that were fulfilled since the last commitment
+       /// are provided. A validating signer should ensure that an outbound HTLC output is removed
+       /// only when the matching preimage is provided and after the corresponding inbound HTLC has
+       /// been removed for forwarded payments.
+       ///
+       /// Note that all the relevant preimages will be provided, but there may also be additional
+       /// irrelevant or duplicate preimages.
+       //
+       // TODO: Document the things someone using this interface should enforce before signing.
+       fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction,
+               inbound_htlc_preimages: Vec<PaymentPreimage>,
+               outbound_htlc_preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>,
+       ) -> Result<(Signature, Vec<Signature>), ()>;
+       /// Creates a signature for a holder's commitment transaction.
+       ///
+       /// This will be called
+       /// - with a non-revoked `commitment_tx`.
+       /// - with the latest `commitment_tx` when we initiate a force-close.
+       ///
+       /// This may be called multiple times for the same transaction.
+       ///
+       /// An external signer implementation should check that the commitment has not been revoked.
+       //
+       // TODO: Document the things someone using this interface should enforce before signing.
+       fn sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction,
+               secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
+       /// Same as [`sign_holder_commitment`], but exists only for tests to get access to holder
+       /// commitment transactions which will be broadcasted later, after the channel has moved on to a
+       /// newer state. Thus, needs its own method as [`sign_holder_commitment`] may enforce that we
+       /// only ever get called once.
+       #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
+       fn unsafe_sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction,
+               secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
+       /// Create a signature for the given input in a transaction spending an HTLC transaction output
+       /// or a commitment transaction `to_local` output when our counterparty broadcasts an old state.
+       ///
+       /// A justice transaction may claim multiple outputs at the same time if timelocks are
+       /// similar, but only a signature for the input at index `input` should be signed for here.
+       /// It may be called multiple times for same output(s) if a fee-bump is needed with regards
+       /// to an upcoming timelock expiration.
+       ///
+       /// Amount is value of the output spent by this input, committed to in the BIP 143 signature.
+       ///
+       /// `per_commitment_key` is revocation secret which was provided by our counterparty when they
+       /// revoked the state which they eventually broadcast. It's not a _holder_ secret key and does
+       /// not allow the spending of any funds by itself (you need our holder `revocation_secret` to do
+       /// so).
+       fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64,
+               per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>
+       ) -> Result<Signature, ()>;
+       /// Create a signature for the given input in a transaction spending a commitment transaction
+       /// HTLC output when our counterparty broadcasts an old state.
+       ///
+       /// A justice transaction may claim multiple outputs at the same time if timelocks are
+       /// similar, but only a signature for the input at index `input` should be signed for here.
+       /// It may be called multiple times for same output(s) if a fee-bump is needed with regards
+       /// to an upcoming timelock expiration.
+       ///
+       /// `amount` is the value of the output spent by this input, committed to in the BIP 143
+       /// signature.
+       ///
+       /// `per_commitment_key` is revocation secret which was provided by our counterparty when they
+       /// revoked the state which they eventually broadcast. It's not a _holder_ secret key and does
+       /// not allow the spending of any funds by itself (you need our holder revocation_secret to do
+       /// so).
+       ///
+       /// `htlc` holds HTLC elements (hash, timelock), thus changing the format of the witness script
+       /// (which is committed to in the BIP 143 signatures).
+       fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64,
+               per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment,
+               secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
+       /// Computes the signature for a commitment transaction's HTLC output used as an input within
+       /// `htlc_tx`, which spends the commitment transaction at index `input`. The signature returned
+       /// must be be computed using [`EcdsaSighashType::All`].
+       ///
+       /// Note that this may be called for HTLCs in the penultimate commitment transaction if a
+       /// [`ChannelMonitor`] [replica](https://github.com/lightningdevkit/rust-lightning/blob/main/GLOSSARY.md#monitor-replicas)
+       /// broadcasts it before receiving the update for the latest commitment transaction.
+       ///
+       /// [`EcdsaSighashType::All`]: bitcoin::sighash::EcdsaSighashType::All
+       /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
+       fn sign_holder_htlc_transaction(&self, htlc_tx: &Transaction, input: usize,
+               htlc_descriptor: &HTLCDescriptor, secp_ctx: &Secp256k1<secp256k1::All>
+       ) -> Result<Signature, ()>;
+       /// Create a signature for a claiming transaction for a HTLC output on a counterparty's commitment
+       /// transaction, either offered or received.
+       ///
+       /// Such a transaction may claim multiples offered outputs at same time if we know the
+       /// preimage for each when we create it, but only the input at index `input` should be
+       /// signed for here. It may be called multiple times for same output(s) if a fee-bump is
+       /// needed with regards to an upcoming timelock expiration.
+       ///
+       /// `witness_script` is either an offered or received script as defined in BOLT3 for HTLC
+       /// outputs.
+       ///
+       /// `amount` is value of the output spent by this input, committed to in the BIP 143 signature.
+       ///
+       /// `per_commitment_point` is the dynamic point corresponding to the channel state
+       /// detected onchain. It has been generated by our counterparty and is used to derive
+       /// channel state keys, which are then included in the witness script and committed to in the
+       /// BIP 143 signature.
+       fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64,
+               per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment,
+               secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
+       /// Create a signature for a (proposed) closing transaction.
+       ///
+       /// Note that, due to rounding, there may be one "missing" satoshi, and either party may have
+       /// chosen to forgo their output as dust.
+       fn sign_closing_transaction(&self, closing_tx: &ClosingTransaction,
+               secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
+       /// Computes the signature for a commitment transaction's anchor output used as an
+       /// input within `anchor_tx`, which spends the commitment transaction, at index `input`.
+       fn sign_holder_anchor_input(
+               &self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<secp256k1::All>,
+       ) -> Result<Signature, ()>;
+       /// Signs a channel announcement message with our funding key proving it comes from one of the
+       /// channel participants.
+       ///
+       /// Channel announcements also require a signature from each node's network key. Our node
+       /// signature is computed through [`NodeSigner::sign_gossip_message`].
+       ///
+       /// Note that if this fails or is rejected, the channel will not be publicly announced and
+       /// our counterparty may (though likely will not) close the channel on us for violating the
+       /// protocol.
+       ///
+       /// [`NodeSigner::sign_gossip_message`]: crate::sign::NodeSigner::sign_gossip_message
+       fn sign_channel_announcement_with_funding_key(
+               &self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>
+       ) -> Result<Signature, ()>;
+}
+
+/// A writeable signer.
+///
+/// There will always be two instances of a signer per channel, one occupied by the
+/// [`ChannelManager`] and another by the channel's [`ChannelMonitor`].
+///
+/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
+/// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
+pub trait WriteableEcdsaChannelSigner: EcdsaChannelSigner + Writeable {}
index c4d4bc002f4c476fb4864a40d8b72d26f4be4a21..4e418f049bbc9867a7a2a7920706e17c135ed23e 100644 (file)
@@ -29,6 +29,8 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
 use bitcoin::hash_types::WPubkeyHash;
 
+#[cfg(taproot)]
+use bitcoin::secp256k1::All;
 use bitcoin::secp256k1::{KeyPair, PublicKey, Scalar, Secp256k1, SecretKey, Signing};
 use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature};
@@ -44,6 +46,8 @@ use crate::ln::{chan_utils, PaymentPreimage};
 use crate::ln::chan_utils::{HTLCOutputInCommitment, make_funding_redeemscript, ChannelPublicKeys, HolderCommitmentTransaction, ChannelTransactionParameters, CommitmentTransaction, ClosingTransaction};
 use crate::ln::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcKey, HtlcBasepoint, RevocationKey, RevocationBasepoint};
 use crate::ln::msgs::{UnsignedChannelAnnouncement, UnsignedGossipMessage};
+#[cfg(taproot)]
+use crate::ln::msgs::PartialSignatureWithNonce;
 use crate::ln::script::ShutdownScript;
 use crate::offers::invoice::UnsignedBolt12Invoice;
 use crate::offers::invoice_request::UnsignedInvoiceRequest;
@@ -52,15 +56,24 @@ use crate::prelude::*;
 use core::convert::TryInto;
 use core::ops::Deref;
 use core::sync::atomic::{AtomicUsize, Ordering};
+#[cfg(taproot)]
+use musig2::types::{PartialSignature, PublicNonce};
 use crate::io::{self, Error};
 use crate::ln::features::ChannelTypeFeatures;
 use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT};
+use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
+#[cfg(taproot)]
+use crate::sign::taproot::TaprootChannelSigner;
 use crate::util::atomic_counter::AtomicCounter;
 use crate::util::chacha20::ChaCha20;
 use crate::util::invoice::construct_invoice_preimage;
 
 pub(crate) mod type_resolver;
 
+pub mod ecdsa;
+#[cfg(taproot)]
+pub mod taproot;
+
 /// Used as initial key material, to be expanded into multiple secret keys (but not to be used
 /// directly). This is used within LDK to encrypt/decrypt inbound payment data.
 ///
@@ -202,6 +215,15 @@ pub enum SpendableOutputDescriptor {
                outpoint: OutPoint,
                /// The output which is referenced by the given outpoint.
                output: TxOut,
+               /// The `channel_keys_id` for the channel which this output came from.
+               ///
+               /// For channels which were generated on LDK 0.0.119 or later, this is the value which was
+               /// passed to the [`SignerProvider::get_destination_script`] call which provided this
+               /// output script.
+               ///
+               /// For channels which were generated prior to LDK 0.0.119, no such argument existed,
+               /// however this field may still be filled in if such data is available.
+               channel_keys_id: Option<[u8; 32]>
        },
        /// An output to a P2WSH script which can be spent with a single signature after an `OP_CSV`
        /// delay.
@@ -265,6 +287,7 @@ pub enum SpendableOutputDescriptor {
 impl_writeable_tlv_based_enum!(SpendableOutputDescriptor,
        (0, StaticOutput) => {
                (0, outpoint, required),
+               (1, channel_keys_id, option),
                (2, output, required),
        },
 ;
@@ -365,7 +388,7 @@ impl SpendableOutputDescriptor {
                                        { witness_weight -= 1; } // Guarantees a low R signature
                                        input_value += descriptor.output.value;
                                },
-                               SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => {
+                               SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output, .. } => {
                                        if !output_set.insert(*outpoint) { return Err(()); }
                                        input.push(TxIn {
                                                previous_output: outpoint.into_bitcoin_outpoint(),
@@ -534,7 +557,7 @@ impl HTLCDescriptor {
        /// Derives the channel signer required to sign the HTLC input.
        pub fn derive_channel_signer<S: WriteableEcdsaChannelSigner, SP: Deref>(&self, signer_provider: &SP) -> S
        where
-               SP::Target: SignerProvider<Signer = S>
+               SP::Target: SignerProvider<EcdsaSigner= S>
        {
                let mut signer = signer_provider.derive_channel_signer(
                        self.channel_derivation_parameters.value_satoshis,
@@ -571,14 +594,20 @@ pub trait ChannelSigner {
        /// Policy checks should be implemented in this function, including checking the amount
        /// sent to us and checking the HTLCs.
        ///
-       /// The preimages of outgoing HTLCs that were fulfilled since the last commitment are provided.
+       /// The preimages of outbound HTLCs that were fulfilled since the last commitment are provided.
        /// A validating signer should ensure that an HTLC output is removed only when the matching
        /// preimage is provided, or when the value to holder is restored.
        ///
        /// Note that all the relevant preimages will be provided, but there may also be additional
        /// irrelevant or duplicate preimages.
        fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction,
-               preimages: Vec<PaymentPreimage>) -> Result<(), ()>;
+               outbound_htlc_preimages: Vec<PaymentPreimage>) -> Result<(), ()>;
+
+       /// Validate the counterparty's revocation.
+       ///
+       /// This is required in order for the signer to make sure that the state has moved
+       /// forward and it is safe to sign the next counterparty commitment.
+       fn validate_counterparty_revocation(&self, idx: u64, secret: &SecretKey) -> Result<(), ()>;
 
        /// Returns the holder's channel public keys and basepoints.
        fn pubkeys(&self) -> &ChannelPublicKeys;
@@ -600,161 +629,6 @@ pub trait ChannelSigner {
        fn provide_channel_parameters(&mut self, channel_parameters: &ChannelTransactionParameters);
 }
 
-/// A trait to sign Lightning channel transactions as described in
-/// [BOLT 3](https://github.com/lightning/bolts/blob/master/03-transactions.md).
-///
-/// Signing services could be implemented on a hardware wallet and should implement signing
-/// policies in order to be secure. Please refer to the [VLS Policy
-/// Controls](https://gitlab.com/lightning-signer/validating-lightning-signer/-/blob/main/docs/policy-controls.md)
-/// for an example of such policies.
-pub trait EcdsaChannelSigner: ChannelSigner {
-       /// Create a signature for a counterparty's commitment transaction and associated HTLC transactions.
-       ///
-       /// Note that if signing fails or is rejected, the channel will be force-closed.
-       ///
-       /// Policy checks should be implemented in this function, including checking the amount
-       /// sent to us and checking the HTLCs.
-       ///
-       /// The preimages of outgoing HTLCs that were fulfilled since the last commitment are provided.
-       /// A validating signer should ensure that an HTLC output is removed only when the matching
-       /// preimage is provided, or when the value to holder is restored.
-       ///
-       /// Note that all the relevant preimages will be provided, but there may also be additional
-       /// irrelevant or duplicate preimages.
-       //
-       // TODO: Document the things someone using this interface should enforce before signing.
-       fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction,
-               preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>
-       ) -> Result<(Signature, Vec<Signature>), ()>;
-       /// Validate the counterparty's revocation.
-       ///
-       /// This is required in order for the signer to make sure that the state has moved
-       /// forward and it is safe to sign the next counterparty commitment.
-       fn validate_counterparty_revocation(&self, idx: u64, secret: &SecretKey) -> Result<(), ()>;
-       /// Creates a signature for a holder's commitment transaction.
-       ///
-       /// This will be called
-       /// - with a non-revoked `commitment_tx`.
-       /// - with the latest `commitment_tx` when we initiate a force-close.
-       ///
-       /// This may be called multiple times for the same transaction.
-       ///
-       /// An external signer implementation should check that the commitment has not been revoked.
-       //
-       // TODO: Document the things someone using this interface should enforce before signing.
-       fn sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction,
-               secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
-       /// Same as [`sign_holder_commitment`], but exists only for tests to get access to holder
-       /// commitment transactions which will be broadcasted later, after the channel has moved on to a
-       /// newer state. Thus, needs its own method as [`sign_holder_commitment`] may enforce that we
-       /// only ever get called once.
-       #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
-       fn unsafe_sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction,
-               secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
-       /// Create a signature for the given input in a transaction spending an HTLC transaction output
-       /// or a commitment transaction `to_local` output when our counterparty broadcasts an old state.
-       ///
-       /// A justice transaction may claim multiple outputs at the same time if timelocks are
-       /// similar, but only a signature for the input at index `input` should be signed for here.
-       /// It may be called multiple times for same output(s) if a fee-bump is needed with regards
-       /// to an upcoming timelock expiration.
-       ///
-       /// Amount is value of the output spent by this input, committed to in the BIP 143 signature.
-       ///
-       /// `per_commitment_key` is revocation secret which was provided by our counterparty when they
-       /// revoked the state which they eventually broadcast. It's not a _holder_ secret key and does
-       /// not allow the spending of any funds by itself (you need our holder `revocation_secret` to do
-       /// so).
-       fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64,
-               per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>
-       ) -> Result<Signature, ()>;
-       /// Create a signature for the given input in a transaction spending a commitment transaction
-       /// HTLC output when our counterparty broadcasts an old state.
-       ///
-       /// A justice transaction may claim multiple outputs at the same time if timelocks are
-       /// similar, but only a signature for the input at index `input` should be signed for here.
-       /// It may be called multiple times for same output(s) if a fee-bump is needed with regards
-       /// to an upcoming timelock expiration.
-       ///
-       /// `amount` is the value of the output spent by this input, committed to in the BIP 143
-       /// signature.
-       ///
-       /// `per_commitment_key` is revocation secret which was provided by our counterparty when they
-       /// revoked the state which they eventually broadcast. It's not a _holder_ secret key and does
-       /// not allow the spending of any funds by itself (you need our holder revocation_secret to do
-       /// so).
-       ///
-       /// `htlc` holds HTLC elements (hash, timelock), thus changing the format of the witness script
-       /// (which is committed to in the BIP 143 signatures).
-       fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64,
-               per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment,
-               secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
-       /// Computes the signature for a commitment transaction's HTLC output used as an input within
-       /// `htlc_tx`, which spends the commitment transaction at index `input`. The signature returned
-       /// must be be computed using [`EcdsaSighashType::All`].
-       ///
-       /// Note that this may be called for HTLCs in the penultimate commitment transaction if a
-       /// [`ChannelMonitor`] [replica](https://github.com/lightningdevkit/rust-lightning/blob/main/GLOSSARY.md#monitor-replicas)
-       /// broadcasts it before receiving the update for the latest commitment transaction.
-       ///
-       /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
-       fn sign_holder_htlc_transaction(&self, htlc_tx: &Transaction, input: usize,
-               htlc_descriptor: &HTLCDescriptor, secp_ctx: &Secp256k1<secp256k1::All>
-       ) -> Result<Signature, ()>;
-       /// Create a signature for a claiming transaction for a HTLC output on a counterparty's commitment
-       /// transaction, either offered or received.
-       ///
-       /// Such a transaction may claim multiples offered outputs at same time if we know the
-       /// preimage for each when we create it, but only the input at index `input` should be
-       /// signed for here. It may be called multiple times for same output(s) if a fee-bump is
-       /// needed with regards to an upcoming timelock expiration.
-       ///
-       /// `witness_script` is either an offered or received script as defined in BOLT3 for HTLC
-       /// outputs.
-       ///
-       /// `amount` is value of the output spent by this input, committed to in the BIP 143 signature.
-       ///
-       /// `per_commitment_point` is the dynamic point corresponding to the channel state
-       /// detected onchain. It has been generated by our counterparty and is used to derive
-       /// channel state keys, which are then included in the witness script and committed to in the
-       /// BIP 143 signature.
-       fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64,
-               per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment,
-               secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
-       /// Create a signature for a (proposed) closing transaction.
-       ///
-       /// Note that, due to rounding, there may be one "missing" satoshi, and either party may have
-       /// chosen to forgo their output as dust.
-       fn sign_closing_transaction(&self, closing_tx: &ClosingTransaction,
-               secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
-       /// Computes the signature for a commitment transaction's anchor output used as an
-       /// input within `anchor_tx`, which spends the commitment transaction, at index `input`.
-       fn sign_holder_anchor_input(
-               &self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<secp256k1::All>,
-       ) -> Result<Signature, ()>;
-       /// Signs a channel announcement message with our funding key proving it comes from one of the
-       /// channel participants.
-       ///
-       /// Channel announcements also require a signature from each node's network key. Our node
-       /// signature is computed through [`NodeSigner::sign_gossip_message`].
-       ///
-       /// Note that if this fails or is rejected, the channel will not be publicly announced and
-       /// our counterparty may (though likely will not) close the channel on us for violating the
-       /// protocol.
-       fn sign_channel_announcement_with_funding_key(
-               &self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>
-       ) -> Result<Signature, ()>;
-}
-
-/// A writeable signer.
-///
-/// There will always be two instances of a signer per channel, one occupied by the
-/// [`ChannelManager`] and another by the channel's [`ChannelMonitor`].
-///
-/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
-/// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
-pub trait WriteableEcdsaChannelSigner: EcdsaChannelSigner + Writeable {}
-
 /// Specifies the recipient of an invoice.
 ///
 /// This indicates to [`NodeSigner::sign_invoice`] what node secret key should be used to sign
@@ -865,9 +739,12 @@ pub trait NodeSigner {
 /// A trait that can return signer instances for individual channels.
 pub trait SignerProvider {
        /// A type which implements [`WriteableEcdsaChannelSigner`] which will be returned by [`Self::derive_channel_signer`].
-       type Signer : WriteableEcdsaChannelSigner;
+       type EcdsaSigner: WriteableEcdsaChannelSigner;
+       #[cfg(taproot)]
+       /// A type which implements [`TaprootChannelSigner`]
+       type TaprootSigner: TaprootChannelSigner;
 
-       /// Generates a unique `channel_keys_id` that can be used to obtain a [`Self::Signer`] through
+       /// Generates a unique `channel_keys_id` that can be used to obtain a [`Self::EcdsaSigner`] through
        /// [`SignerProvider::derive_channel_signer`]. The `user_channel_id` is provided to allow
        /// implementations of [`SignerProvider`] to maintain a mapping between itself and the generated
        /// `channel_keys_id`.
@@ -881,7 +758,7 @@ pub trait SignerProvider {
        /// [`SignerProvider::generate_channel_keys_id`]. Otherwise, an existing `Signer` can be
        /// re-derived from its `channel_keys_id`, which can be obtained through its trait method
        /// [`ChannelSigner::channel_keys_id`].
-       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::Signer;
+       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::EcdsaSigner;
 
        /// Reads a [`Signer`] for this [`SignerProvider`] from the given input stream.
        /// This is only called during deserialization of other objects which contain
@@ -893,18 +770,19 @@ pub trait SignerProvider {
        /// This method is slowly being phased out -- it will only be called when reading objects
        /// written by LDK versions prior to 0.0.113.
        ///
-       /// [`Signer`]: Self::Signer
+       /// [`Signer`]: Self::EcdsaSigner
        /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
        /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
-       fn read_chan_signer(&self, reader: &[u8]) -> Result<Self::Signer, DecodeError>;
+       fn read_chan_signer(&self, reader: &[u8]) -> Result<Self::EcdsaSigner, DecodeError>;
 
        /// Get a script pubkey which we send funds to when claiming on-chain contestable outputs.
        ///
        /// If this function returns an error, this will result in a channel failing to open.
        ///
        /// This method should return a different value each time it is called, to avoid linking
-       /// on-chain funds across channels as controlled to the same user.
-       fn get_destination_script(&self) -> Result<ScriptBuf, ()>;
+       /// on-chain funds across channels as controlled to the same user. `channel_keys_id` may be
+       /// used to derive a unique value for each channel.
+       fn get_destination_script(&self, channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()>;
 
        /// Get a script pubkey which we will send funds to when closing a channel.
        ///
@@ -1208,7 +1086,11 @@ impl ChannelSigner for InMemorySigner {
                chan_utils::build_commitment_secret(&self.commitment_seed, idx)
        }
 
-       fn validate_holder_commitment(&self, _holder_tx: &HolderCommitmentTransaction, _preimages: Vec<PaymentPreimage>) -> Result<(), ()> {
+       fn validate_holder_commitment(&self, _holder_tx: &HolderCommitmentTransaction, _outbound_htlc_preimages: Vec<PaymentPreimage>) -> Result<(), ()> {
+               Ok(())
+       }
+
+       fn validate_counterparty_revocation(&self, _idx: u64, _secret: &SecretKey) -> Result<(), ()> {
                Ok(())
        }
 
@@ -1230,7 +1112,7 @@ impl ChannelSigner for InMemorySigner {
 const MISSING_PARAMS_ERR: &'static str = "ChannelSigner::provide_channel_parameters must be called before signing operations";
 
 impl EcdsaChannelSigner for InMemorySigner {
-       fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction, _preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
+       fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction, _inbound_htlc_preimages: Vec<PaymentPreimage>, _outbound_htlc_preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
                let trusted_tx = commitment_tx.trust();
                let keys = trusted_tx.keys();
 
@@ -1259,10 +1141,6 @@ impl EcdsaChannelSigner for InMemorySigner {
                Ok((commitment_sig, htlc_sigs))
        }
 
-       fn validate_counterparty_revocation(&self, _idx: u64, _secret: &SecretKey) -> Result<(), ()> {
-               Ok(())
-       }
-
        fn sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
                let funding_pubkey = PublicKey::from_secret_key(secp_ctx, &self.funding_key);
                let counterparty_keys = self.counterparty_pubkeys().expect(MISSING_PARAMS_ERR);
@@ -1376,6 +1254,45 @@ impl EcdsaChannelSigner for InMemorySigner {
        }
 }
 
+#[cfg(taproot)]
+impl TaprootChannelSigner for InMemorySigner {
+       fn generate_local_nonce_pair(&self, commitment_number: u64, secp_ctx: &Secp256k1<All>) -> PublicNonce {
+               todo!()
+       }
+
+       fn partially_sign_counterparty_commitment(&self, counterparty_nonce: PublicNonce, commitment_tx: &CommitmentTransaction, inbound_htlc_preimages: Vec<PaymentPreimage>, outbound_htlc_preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<All>) -> Result<(PartialSignatureWithNonce, Vec<schnorr::Signature>), ()> {
+               todo!()
+       }
+
+       fn finalize_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, counterparty_partial_signature: PartialSignatureWithNonce, secp_ctx: &Secp256k1<All>) -> Result<PartialSignature, ()> {
+               todo!()
+       }
+
+       fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<All>) -> Result<schnorr::Signature, ()> {
+               todo!()
+       }
+
+       fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<All>) -> Result<schnorr::Signature, ()> {
+               todo!()
+       }
+
+       fn sign_holder_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor, secp_ctx: &Secp256k1<All>) -> Result<schnorr::Signature, ()> {
+               todo!()
+       }
+
+       fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<All>) -> Result<schnorr::Signature, ()> {
+               todo!()
+       }
+
+       fn partially_sign_closing_transaction(&self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<All>) -> Result<PartialSignature, ()> {
+               todo!()
+       }
+
+       fn sign_holder_anchor_input(&self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<All>) -> Result<schnorr::Signature, ()> {
+               todo!()
+       }
+}
+
 const SERIALIZATION_VERSION: u8 = 1;
 
 const MIN_SERIALIZATION_VERSION: u8 = 1;
@@ -1639,7 +1556,7 @@ impl KeysManager {
                                        let witness = keys_cache.as_ref().unwrap().0.sign_dynamic_p2wsh_input(&psbt.unsigned_tx, input_idx, &descriptor, &secp_ctx)?;
                                        psbt.inputs[input_idx].final_script_witness = Some(witness);
                                },
-                               SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => {
+                               SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output, .. } => {
                                        let input_idx = psbt.unsigned_tx.input.iter().position(|i| i.previous_output == outpoint.into_bitcoin_outpoint()).ok_or(())?;
                                        let derivation_idx = if output.script_pubkey == self.destination_script {
                                                1
@@ -1778,7 +1695,9 @@ impl NodeSigner for KeysManager {
 }
 
 impl SignerProvider for KeysManager {
-       type Signer = InMemorySigner;
+       type EcdsaSigner = InMemorySigner;
+       #[cfg(taproot)]
+       type TaprootSigner = InMemorySigner;
 
        fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, user_channel_id: u128) -> [u8; 32] {
                let child_idx = self.channel_child_index.fetch_add(1, Ordering::AcqRel);
@@ -1796,15 +1715,15 @@ impl SignerProvider for KeysManager {
                id
        }
 
-       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::Signer {
+       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
                self.derive_channel_keys(channel_value_satoshis, &channel_keys_id)
        }
 
-       fn read_chan_signer(&self, reader: &[u8]) -> Result<Self::Signer, DecodeError> {
+       fn read_chan_signer(&self, reader: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> {
                InMemorySigner::read(&mut io::Cursor::new(reader), self)
        }
 
-       fn get_destination_script(&self) -> Result<ScriptBuf, ()> {
+       fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
                Ok(self.destination_script.clone())
        }
 
@@ -1897,22 +1816,24 @@ impl NodeSigner for PhantomKeysManager {
 }
 
 impl SignerProvider for PhantomKeysManager {
-       type Signer = InMemorySigner;
+       type EcdsaSigner = InMemorySigner;
+       #[cfg(taproot)]
+       type TaprootSigner = InMemorySigner;
 
        fn generate_channel_keys_id(&self, inbound: bool, channel_value_satoshis: u64, user_channel_id: u128) -> [u8; 32] {
                self.inner.generate_channel_keys_id(inbound, channel_value_satoshis, user_channel_id)
        }
 
-       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::Signer {
+       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
                self.inner.derive_channel_signer(channel_value_satoshis, channel_keys_id)
        }
 
-       fn read_chan_signer(&self, reader: &[u8]) -> Result<Self::Signer, DecodeError> {
+       fn read_chan_signer(&self, reader: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> {
                self.inner.read_chan_signer(reader)
        }
 
-       fn get_destination_script(&self) -> Result<ScriptBuf, ()> {
-               self.inner.get_destination_script()
+       fn get_destination_script(&self, channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
+               self.inner.get_destination_script(channel_keys_id)
        }
 
        fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
diff --git a/lightning/src/sign/taproot.rs b/lightning/src/sign/taproot.rs
new file mode 100644 (file)
index 0000000..230383f
--- /dev/null
@@ -0,0 +1,151 @@
+//! Defines a Taproot-specific signer type.
+
+use alloc::vec::Vec;
+use bitcoin::blockdata::transaction::Transaction;
+use bitcoin::secp256k1;
+use bitcoin::secp256k1::{PublicKey, schnorr::Signature, Secp256k1, SecretKey};
+
+use musig2::types::{PartialSignature, PublicNonce};
+
+use crate::ln::chan_utils::{ClosingTransaction, CommitmentTransaction, HolderCommitmentTransaction, HTLCOutputInCommitment};
+use crate::ln::msgs::PartialSignatureWithNonce;
+use crate::ln::PaymentPreimage;
+use crate::sign::{ChannelSigner, HTLCDescriptor};
+
+/// A Taproot-specific signer type that defines signing-related methods that are either unique to
+/// Taproot or have argument or return types that differ from the ones an ECDSA signer would be
+/// expected to have.
+pub trait TaprootChannelSigner: ChannelSigner {
+       /// Generate a local nonce pair, which requires committing to ahead of time.
+       /// The counterparty needs the public nonce generated herein to compute a partial signature.
+       fn generate_local_nonce_pair(&self, commitment_number: u64, secp_ctx: &Secp256k1<secp256k1::All>) -> PublicNonce;
+
+       /// Create a signature for a counterparty's commitment transaction and associated HTLC transactions.
+       ///
+       /// Note that if signing fails or is rejected, the channel will be force-closed.
+       ///
+       /// Policy checks should be implemented in this function, including checking the amount
+       /// sent to us and checking the HTLCs.
+       ///
+       /// The preimages of outbound and inbound HTLCs that were fulfilled since the last commitment
+       /// are provided. A validating signer should ensure that an outbound HTLC output is removed
+       /// only when the matching preimage is provided and after the corresponding inbound HTLC has
+       /// been removed for forwarded payments.
+       ///
+       /// Note that all the relevant preimages will be provided, but there may also be additional
+       /// irrelevant or duplicate preimages.
+       //
+       // TODO: Document the things someone using this interface should enforce before signing.
+       fn partially_sign_counterparty_commitment(&self, counterparty_nonce: PublicNonce,
+               commitment_tx: &CommitmentTransaction,
+               inbound_htlc_preimages: Vec<PaymentPreimage>,
+               outbound_htlc_preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>,
+       ) -> Result<(PartialSignatureWithNonce, Vec<Signature>), ()>;
+
+       /// Creates a signature for a holder's commitment transaction.
+       ///
+       /// This will be called
+       /// - with a non-revoked `commitment_tx`.
+       /// - with the latest `commitment_tx` when we initiate a force-close.
+       ///
+       /// This may be called multiple times for the same transaction.
+       ///
+       /// An external signer implementation should check that the commitment has not been revoked.
+       ///
+       // TODO: Document the things someone using this interface should enforce before signing.
+       fn finalize_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction,
+               counterparty_partial_signature: PartialSignatureWithNonce,
+               secp_ctx: &Secp256k1<secp256k1::All>
+       ) -> Result<PartialSignature, ()>;
+
+       /// Create a signature for the given input in a transaction spending an HTLC transaction output
+       /// or a commitment transaction `to_local` output when our counterparty broadcasts an old state.
+       ///
+       /// A justice transaction may claim multiple outputs at the same time if timelocks are
+       /// similar, but only a signature for the input at index `input` should be signed for here.
+       /// It may be called multiple times for same output(s) if a fee-bump is needed with regards
+       /// to an upcoming timelock expiration.
+       ///
+       /// Amount is value of the output spent by this input, committed to in the BIP 341 signature.
+       ///
+       /// `per_commitment_key` is revocation secret which was provided by our counterparty when they
+       /// revoked the state which they eventually broadcast. It's not a _holder_ secret key and does
+       /// not allow the spending of any funds by itself (you need our holder `revocation_secret` to do
+       /// so).
+       fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64,
+               per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>,
+       ) -> Result<Signature, ()>;
+
+       /// Create a signature for the given input in a transaction spending a commitment transaction
+       /// HTLC output when our counterparty broadcasts an old state.
+       ///
+       /// A justice transaction may claim multiple outputs at the same time if timelocks are
+       /// similar, but only a signature for the input at index `input` should be signed for here.
+       /// It may be called multiple times for same output(s) if a fee-bump is needed with regards
+       /// to an upcoming timelock expiration.
+       ///
+       /// `amount` is the value of the output spent by this input, committed to in the BIP 341
+       /// signature.
+       ///
+       /// `per_commitment_key` is revocation secret which was provided by our counterparty when they
+       /// revoked the state which they eventually broadcast. It's not a _holder_ secret key and does
+       /// not allow the spending of any funds by itself (you need our holder revocation_secret to do
+       /// so).
+       ///
+       /// `htlc` holds HTLC elements (hash, timelock), thus changing the format of the witness script
+       /// (which is committed to in the BIP 341 signatures).
+       fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64,
+               per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment,
+               secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
+
+       /// Computes the signature for a commitment transaction's HTLC output used as an input within
+       /// `htlc_tx`, which spends the commitment transaction at index `input`. The signature returned
+       /// must be be computed using [`TapSighashType::Default`].
+       ///
+       /// Note that this may be called for HTLCs in the penultimate commitment transaction if a
+       /// [`ChannelMonitor`] [replica](https://github.com/lightningdevkit/rust-lightning/blob/main/GLOSSARY.md#monitor-replicas)
+       /// broadcasts it before receiving the update for the latest commitment transaction.
+       ///
+       ///
+       /// [`TapSighashType::Default`]: bitcoin::sighash::TapSighashType::Default
+       /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
+       fn sign_holder_htlc_transaction(&self, htlc_tx: &Transaction, input: usize,
+               htlc_descriptor: &HTLCDescriptor, secp_ctx: &Secp256k1<secp256k1::All>,
+       ) -> Result<Signature, ()>;
+
+       /// Create a signature for a claiming transaction for a HTLC output on a counterparty's commitment
+       /// transaction, either offered or received.
+       ///
+       /// Such a transaction may claim multiples offered outputs at same time if we know the
+       /// preimage for each when we create it, but only the input at index `input` should be
+       /// signed for here. It may be called multiple times for same output(s) if a fee-bump is
+       /// needed with regards to an upcoming timelock expiration.
+       ///
+       /// `witness_script` is either an offered or received script as defined in BOLT3 for HTLC
+       /// outputs.
+       ///
+       /// `amount` is value of the output spent by this input, committed to in the BIP 341 signature.
+       ///
+       /// `per_commitment_point` is the dynamic point corresponding to the channel state
+       /// detected onchain. It has been generated by our counterparty and is used to derive
+       /// channel state keys, which are then included in the witness script and committed to in the
+       /// BIP 341 signature.
+       fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64,
+               per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment,
+               secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
+
+       /// Create a signature for a (proposed) closing transaction.
+       ///
+       /// Note that, due to rounding, there may be one "missing" satoshi, and either party may have
+       /// chosen to forgo their output as dust.
+       fn partially_sign_closing_transaction(&self, closing_tx: &ClosingTransaction,
+               secp_ctx: &Secp256k1<secp256k1::All>) -> Result<PartialSignature, ()>;
+
+       /// Computes the signature for a commitment transaction's anchor output used as an
+       /// input within `anchor_tx`, which spends the commitment transaction, at index `input`.
+       fn sign_holder_anchor_input(
+               &self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<secp256k1::All>,
+       ) -> Result<Signature, ()>;
+
+       // TODO: sign channel announcement
+}
index f76650982c2b4f2ae8e4126975d56728b2e4e261..2a122da34470332e1147a427de86c864abe4adab 100644 (file)
@@ -1,34 +1,43 @@
-use crate::sign::{ChannelSigner, EcdsaChannelSigner};
+use core::ops::Deref;
+use crate::sign::{ChannelSigner, SignerProvider};
 
-pub(crate) enum ChannelSignerType<ECS: EcdsaChannelSigner> {
+pub(crate) enum ChannelSignerType<SP: Deref> where SP::Target: SignerProvider {
        // in practice, this will only ever be an EcdsaChannelSigner (specifically, Writeable)
-       Ecdsa(ECS)
+       Ecdsa(<SP::Target as SignerProvider>::EcdsaSigner),
+       #[cfg(taproot)]
+       Taproot(<SP::Target as SignerProvider>::TaprootSigner),
 }
 
-impl<ECS: EcdsaChannelSigner> ChannelSignerType<ECS>{
+impl<SP: Deref> ChannelSignerType<SP> where SP::Target: SignerProvider {
        pub(crate) fn as_ref(&self) -> &dyn ChannelSigner {
                match self {
-                       ChannelSignerType::Ecdsa(ecs) => ecs
+                       ChannelSignerType::Ecdsa(ecs) => ecs,
+                       #[cfg(taproot)]
+                       ChannelSignerType::Taproot(tcs) => tcs,
                }
        }
 
        pub(crate) fn as_mut(&mut self) -> &mut dyn ChannelSigner {
                match self {
-                       ChannelSignerType::Ecdsa(ecs) => ecs
+                       ChannelSignerType::Ecdsa(ecs) => ecs,
+                       #[cfg(taproot)]
+                       ChannelSignerType::Taproot(tcs) => tcs,
                }
        }
 
        #[allow(unused)]
-       pub(crate) fn as_ecdsa(&self) -> Option<&ECS> {
+       pub(crate) fn as_ecdsa(&self) -> Option<&<SP::Target as SignerProvider>::EcdsaSigner> {
                match self {
-                       ChannelSignerType::Ecdsa(ecs) => Some(ecs)
+                       ChannelSignerType::Ecdsa(ecs) => Some(ecs),
+                       _ => None
                }
        }
 
        #[allow(unused)]
-       pub(crate) fn as_mut_ecdsa(&mut self) -> Option<&mut ECS> {
+       pub(crate) fn as_mut_ecdsa(&mut self) -> Option<&mut <SP::Target as SignerProvider>::EcdsaSigner> {
                match self {
-                       ChannelSignerType::Ecdsa(ecs) => Some(ecs)
+                       ChannelSignerType::Ecdsa(ecs) => Some(ecs),
+                       _ => None
                }
        }
 }
index 27cfb9b8f782c4bafabecd5180a5d0d2256105c3..0f92bd6caa3ad7c914b304db24d5fa69a8456b85 100644 (file)
@@ -37,10 +37,6 @@ impl<T> Mutex<T> {
                Ok(MutexGuard { lock: self.inner.borrow_mut() })
        }
 
-       pub fn try_lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
-               Ok(MutexGuard { lock: self.inner.borrow_mut() })
-       }
-
        pub fn into_inner(self) -> LockResult<T> {
                Ok(self.inner.into_inner())
        }
index f46b344f2ce144c235e40e041743e707570e43a5..865a09fa0401f2d705cf989e9c16d346e7e7f964 100644 (file)
@@ -21,6 +21,7 @@ mod real_chacha {
        struct u32x4(pub u32, pub u32, pub u32, pub u32);
        impl ::core::ops::Add for u32x4 {
                type Output = u32x4;
+               #[inline]
                fn add(self, rhs: u32x4) -> u32x4 {
                        u32x4(self.0.wrapping_add(rhs.0),
                              self.1.wrapping_add(rhs.1),
@@ -30,6 +31,7 @@ mod real_chacha {
        }
        impl ::core::ops::Sub for u32x4 {
                type Output = u32x4;
+               #[inline]
                fn sub(self, rhs: u32x4) -> u32x4 {
                        u32x4(self.0.wrapping_sub(rhs.0),
                              self.1.wrapping_sub(rhs.1),
@@ -39,23 +41,27 @@ mod real_chacha {
        }
        impl ::core::ops::BitXor for u32x4 {
                type Output = u32x4;
+               #[inline]
                fn bitxor(self, rhs: u32x4) -> u32x4 {
                        u32x4(self.0 ^ rhs.0, self.1 ^ rhs.1, self.2 ^ rhs.2, self.3 ^ rhs.3)
                }
        }
-       impl ::core::ops::Shr<u32x4> for u32x4 {
+       impl ::core::ops::Shr<u8> for u32x4 {
                type Output = u32x4;
-               fn shr(self, rhs: u32x4) -> u32x4 {
-                       u32x4(self.0 >> rhs.0, self.1 >> rhs.1, self.2 >> rhs.2, self.3 >> rhs.3)
+               #[inline]
+               fn shr(self, shr: u8) -> u32x4 {
+                       u32x4(self.0 >> shr, self.1 >> shr, self.2 >> shr, self.3 >> shr)
                }
        }
-       impl ::core::ops::Shl<u32x4> for u32x4 {
+       impl ::core::ops::Shl<u8> for u32x4 {
                type Output = u32x4;
-               fn shl(self, rhs: u32x4) -> u32x4 {
-                       u32x4(self.0 << rhs.0, self.1 << rhs.1, self.2 << rhs.2, self.3 << rhs.3)
+               #[inline]
+               fn shl(self, shl: u8) -> u32x4 {
+                       u32x4(self.0 << shl, self.1 << shl, self.2 << shl, self.3 << shl)
                }
        }
        impl u32x4 {
+               #[inline]
                fn from_bytes(bytes: &[u8]) -> Self {
                        assert_eq!(bytes.len(), 4*4);
                        Self (
@@ -118,31 +124,25 @@ mod real_chacha {
        macro_rules! round{
                ($state: expr) => {{
                        $state.a = $state.a + $state.b;
-                       rotate!($state.d, $state.a, S16);
+                       rotate!($state.d, $state.a, 16);
                        $state.c = $state.c + $state.d;
-                       rotate!($state.b, $state.c, S12);
+                       rotate!($state.b, $state.c, 12);
                        $state.a = $state.a + $state.b;
-                       rotate!($state.d, $state.a, S8);
+                       rotate!($state.d, $state.a, 8);
                        $state.c = $state.c + $state.d;
-                       rotate!($state.b, $state.c, S7);
+                       rotate!($state.b, $state.c, 7);
                }}
        }
 
        macro_rules! rotate {
-               ($a: expr, $b: expr, $c:expr) => {{
+               ($a: expr, $b: expr, $rot: expr) => {{
                        let v = $a ^ $b;
-                       let r = S32 - $c;
+                       let r = 32 - $rot;
                        let right = v >> r;
-                       $a = (v << $c) ^ right
+                       $a = (v << $rot) ^ right
                }}
        }
 
-       const S32:u32x4 = u32x4(32, 32, 32, 32);
-       const S16:u32x4 = u32x4(16, 16, 16, 16);
-       const S12:u32x4 = u32x4(12, 12, 12, 12);
-       const S8:u32x4 = u32x4(8, 8, 8, 8);
-       const S7:u32x4 = u32x4(7, 7, 7, 7);
-
        impl ChaCha20 {
                pub fn new(key: &[u8], nonce: &[u8]) -> ChaCha20 {
                        assert!(key.len() == 16 || key.len() == 32);
index bb5903b2c8d166783c8b6a275441282e9e7d1b27..98963c7c2bd4fd823c614e25b50dd1785d8dbab2 100644 (file)
@@ -64,6 +64,7 @@ pub fn sign<C: Signing>(ctx: &Secp256k1<C>, msg: &Message, sk: &SecretKey) -> Si
 }
 
 #[inline]
+#[allow(unused_variables)]
 pub fn sign_with_aux_rand<C: Signing, ES: Deref>(
        ctx: &Secp256k1<C>, msg: &Message, sk: &SecretKey, entropy_source: &ES
 ) -> Signature where ES::Target: EntropySource {
index dbca9b785e85dfbaf3c68253e7e47b91225c6bdc..92ea8ffed55a91c218c4481e01a83f924536585d 100644 (file)
@@ -18,7 +18,9 @@ use bitcoin::secp256k1::PublicKey;
 
 use core::cmp;
 use core::fmt;
+use core::ops::Deref;
 
+use crate::ln::ChannelId;
 #[cfg(c_bindings)]
 use crate::prelude::*; // Needed for String
 
@@ -95,6 +97,15 @@ impl Level {
 pub struct Record<'a> {
        /// The verbosity level of the message.
        pub level: Level,
+       /// The node id of the peer pertaining to the logged record.
+       ///
+       /// Note that in some cases a [`Self::channel_id`] may be filled in but this may still be
+       /// `None`, depending on if the peer information is readily available in LDK when the log is
+       /// generated.
+       pub peer_id: Option<PublicKey>,
+       /// The channel id of the channel pertaining to the logged record. May be a temporary id before
+       /// the channel has been funded.
+       pub channel_id: Option<ChannelId>,
        #[cfg(not(c_bindings))]
        /// The message body.
        pub args: fmt::Arguments<'a>,
@@ -119,9 +130,14 @@ impl<'a> Record<'a> {
        ///
        /// This is not exported to bindings users as fmt can't be used in C
        #[inline]
-       pub fn new(level: Level, args: fmt::Arguments<'a>, module_path: &'static str, file: &'static str, line: u32) -> Record<'a> {
+       pub fn new(
+               level: Level, peer_id: Option<PublicKey>, channel_id: Option<ChannelId>,
+               args: fmt::Arguments<'a>, module_path: &'static str, file: &'static str, line: u32
+       ) -> Record<'a> {
                Record {
                        level,
+                       peer_id,
+                       channel_id,
                        #[cfg(not(c_bindings))]
                        args,
                        #[cfg(c_bindings)]
@@ -135,10 +151,43 @@ impl<'a> Record<'a> {
        }
 }
 
-/// A trait encapsulating the operations required of a logger
+/// A trait encapsulating the operations required of a logger.
 pub trait Logger {
-       /// Logs the `Record`
-       fn log(&self, record: &Record);
+       /// Logs the [`Record`].
+       fn log(&self, record: Record);
+}
+
+/// Adds relevant context to a [`Record`] before passing it to the wrapped [`Logger`].
+pub struct WithContext<'a, L: Deref> where L::Target: Logger {
+       /// The logger to delegate to after adding context to the record.
+       logger: &'a L,
+       /// The node id of the peer pertaining to the logged record.
+       peer_id: Option<PublicKey>,
+       /// The channel id of the channel pertaining to the logged record.
+       channel_id: Option<ChannelId>,
+}
+
+impl<'a, L: Deref> Logger for WithContext<'a, L> where L::Target: Logger {
+       fn log(&self, mut record: Record) {
+               if self.peer_id.is_some() {
+                       record.peer_id = self.peer_id
+               };
+               if self.channel_id.is_some() {
+                       record.channel_id = self.channel_id;
+               }
+               self.logger.log(record)
+       }
+}
+
+impl<'a, L: Deref> WithContext<'a, L> where L::Target: Logger {
+       /// Wraps the given logger, providing additional context to any logged records.
+       pub fn from(logger: &'a L, peer_id: Option<PublicKey>, channel_id: Option<ChannelId>) -> Self {
+               WithContext {
+                       logger,
+                       peer_id,
+                       channel_id,
+               }
+       }
 }
 
 /// Wrapper for logging a [`PublicKey`] in hex format.
@@ -191,7 +240,9 @@ impl<T: fmt::Display, I: core::iter::Iterator<Item = T> + Clone> fmt::Display fo
 
 #[cfg(test)]
 mod tests {
-       use crate::util::logger::{Logger, Level};
+       use bitcoin::secp256k1::{PublicKey, SecretKey, Secp256k1};
+       use crate::ln::ChannelId;
+       use crate::util::logger::{Logger, Level, WithContext};
        use crate::util::test_utils::TestLogger;
        use crate::sync::Arc;
 
@@ -203,11 +254,11 @@ mod tests {
        }
 
        struct WrapperLog {
-               logger: Arc<Logger>
+               logger: Arc<dyn Logger>
        }
 
        impl WrapperLog {
-               fn new(logger: Arc<Logger>) -> WrapperLog {
+               fn new(logger: Arc<dyn Logger>) -> WrapperLog {
                        WrapperLog {
                                logger,
                        }
@@ -227,11 +278,46 @@ mod tests {
        fn test_logging_macros() {
                let mut logger = TestLogger::new();
                logger.enable(Level::Gossip);
-               let logger : Arc<Logger> = Arc::new(logger);
+               let logger : Arc<dyn Logger> = Arc::new(logger);
                let wrapper = WrapperLog::new(Arc::clone(&logger));
                wrapper.call_macros();
        }
 
+       #[test]
+       fn test_logging_with_context() {
+               let logger = &TestLogger::new();
+               let secp_ctx = Secp256k1::new();
+               let pk = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let context_logger = WithContext::from(&logger, Some(pk), Some(ChannelId([0; 32])));
+               log_error!(context_logger, "This is an error");
+               log_warn!(context_logger, "This is an error");
+               log_debug!(context_logger, "This is an error");
+               log_trace!(context_logger, "This is an error");
+               log_gossip!(context_logger, "This is an error");
+               log_info!(context_logger, "This is an error");
+               logger.assert_log_context_contains(
+                       "lightning::util::logger::tests", Some(pk), Some(ChannelId([0;32])), 6
+               );
+       }
+
+       #[test]
+       fn test_logging_with_multiple_wrapped_context() {
+               let logger = &TestLogger::new();
+               let secp_ctx = Secp256k1::new();
+               let pk = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let context_logger = &WithContext::from(&logger, None, Some(ChannelId([0; 32])));
+               let full_context_logger = WithContext::from(&context_logger, Some(pk), None);
+               log_error!(full_context_logger, "This is an error");
+               log_warn!(full_context_logger, "This is an error");
+               log_debug!(full_context_logger, "This is an error");
+               log_trace!(full_context_logger, "This is an error");
+               log_gossip!(full_context_logger, "This is an error");
+               log_info!(full_context_logger, "This is an error");
+               logger.assert_log_context_contains(
+                       "lightning::util::logger::tests", Some(pk), Some(ChannelId([0;32])), 6
+               );
+       }
+
        #[test]
        fn test_log_ordering() {
                assert!(Level::Error > Level::Warn);
index 4836b4d6814f705cb111515a983733c4566e3a34..203c544e0096385c959d124f051bdc15295e958e 100644 (file)
@@ -159,7 +159,7 @@ macro_rules! log_spendable {
 #[macro_export]
 macro_rules! log_internal {
        ($logger: expr, $lvl:expr, $($arg:tt)+) => (
-               $logger.log(&$crate::util::logger::Record::new($lvl, format_args!($($arg)+), module_path!(), file!(), line!()))
+               $logger.log($crate::util::logger::Record::new($lvl, None, None, format_args!($($arg)+), module_path!(), file!(), line!()))
        );
 }
 
index 9928597fb8a2b10b112b25ca4bef1e70e065dc03..e63290620516ee0b6f62b7bb57c0c6d2b9369d49 100644 (file)
@@ -21,7 +21,7 @@ use crate::prelude::*;
 use crate::chain;
 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
-use crate::sign::{EntropySource, NodeSigner, WriteableEcdsaChannelSigner, SignerProvider};
+use crate::sign::{EntropySource, NodeSigner, ecdsa::WriteableEcdsaChannelSigner, SignerProvider};
 use crate::chain::transaction::OutPoint;
 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
 use crate::ln::channelmanager::ChannelManager;
@@ -132,7 +132,7 @@ pub trait KVStore {
 
 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
 pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>>
-       where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
                T::Target: 'static + BroadcasterInterface,
                ES::Target: 'static + EntropySource,
                NS::Target: 'static + NodeSigner,
@@ -153,7 +153,7 @@ pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F:
 
 
 impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
-       where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
                T::Target: 'static + BroadcasterInterface,
                ES::Target: 'static + EntropySource,
                NS::Target: 'static + NodeSigner,
@@ -221,7 +221,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSign
 /// Read previously persisted [`ChannelMonitor`]s from the store.
 pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
        kv_store: K, entropy_source: ES, signer_provider: SP,
-) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
+) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
 where
        K::Target: KVStore,
        ES::Target: EntropySource + Sized,
@@ -246,7 +246,7 @@ where
                        io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
                })?;
 
-               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
+               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
                        &mut io::Cursor::new(
                                kv_store.read(CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key)?),
                        (&*entropy_source, &*signer_provider),
@@ -334,9 +334,9 @@ where
 /// [`MonitorUpdatingPersister::read_all_channel_monitors_with_updates`]. Alternatively, users can
 /// list channel monitors themselves and load channels individually using
 /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`].
-/// 
+///
 /// ## EXTREMELY IMPORTANT
-/// 
+///
 /// It is extremely important that your [`KVStore::read`] implementation uses the
 /// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in
 /// that circumstance (not when there is really a permissions error, for example). This is because
@@ -346,9 +346,10 @@ where
 ///
 /// # Pruning stale channel updates
 ///
-/// Stale updates are pruned when a full monitor is written. The old monitor is first read, and if
-/// that succeeds, updates in the range between the old and new monitors are deleted. The `lazy`
-/// flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
+/// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`.
+/// Monitor updates in the range between the latest `update_id` and `update_id - maximum_pending_updates`
+/// are deleted.
+/// The `lazy` flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
 /// will complete. However, stale updates are not a problem for data integrity, since updates are
 /// only read that are higher than the stored [`ChannelMonitor`]'s `update_id`.
 ///
@@ -385,7 +386,7 @@ where
        /// consolidation will frequently occur with fewer updates than what you set here; this number
        /// is merely the maximum that may be stored. When setting this value, consider that for higher
        /// values of `maximum_pending_updates`:
-       /// 
+       ///
        ///   - [`MonitorUpdatingPersister`] will tend to write more [`ChannelMonitorUpdate`]s than
        /// [`ChannelMonitor`]s, approaching one [`ChannelMonitor`] write for every
        /// `maximum_pending_updates` [`ChannelMonitorUpdate`]s.
@@ -414,7 +415,7 @@ where
        /// documentation for [`MonitorUpdatingPersister`].
        pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref>(
                &self, broadcaster: &B, fee_estimator: &F,
-       ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
+       ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
        where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
@@ -448,12 +449,12 @@ where
        ///
        /// The correct `monitor_key` would be:
        /// `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
-       /// 
+       ///
        /// Loading a large number of monitors will be faster if done in parallel. You can use this
        /// function to accomplish this. Take care to limit the number of parallel readers.
        pub fn read_channel_monitor_with_updates<B: Deref, F: Deref>(
                &self, broadcaster: &B, fee_estimator: &F, monitor_key: String,
-       ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error>
+       ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error>
        where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
@@ -494,7 +495,7 @@ where
        /// Read a channel monitor.
        fn read_monitor(
                &self, monitor_name: &MonitorName,
-       ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error> {
+       ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error> {
                let outpoint: OutPoint = monitor_name.try_into()?;
                let mut monitor_cursor = io::Cursor::new(self.kv_store.read(
                        CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
@@ -505,7 +506,7 @@ where
                if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
                        monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
                }
-               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
+               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
                        &mut monitor_cursor,
                        (&*self.entropy_source, &*self.signer_provider),
                ) {
@@ -594,7 +595,7 @@ where
        }
 }
 
-impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref> 
+impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref>
        Persist<ChannelSigner> for MonitorUpdatingPersister<K, L, ES, SP>
 where
        K::Target: KVStore,
@@ -610,24 +611,6 @@ where
        ) -> chain::ChannelMonitorUpdateStatus {
                // Determine the proper key for this monitor
                let monitor_name = MonitorName::from(funding_txo);
-               let maybe_old_monitor = self.read_monitor(&monitor_name);
-               match maybe_old_monitor {
-                       Ok((_, ref old_monitor)) => {
-                               // Check that this key isn't already storing a monitor with a higher update_id
-                               // (collision)
-                               if old_monitor.get_latest_update_id() > monitor.get_latest_update_id() {
-                                       log_error!(
-                                               self.logger,
-                                               "Tried to write a monitor at the same outpoint {} with a higher update_id!",
-                                               monitor_name.as_str()
-                                       );
-                                       return chain::ChannelMonitorUpdateStatus::UnrecoverableError;
-                               }
-                       }
-                       // This means the channel monitor is new.
-                       Err(ref e) if e.kind() == io::ErrorKind::NotFound => {}
-                       _ => return chain::ChannelMonitorUpdateStatus::UnrecoverableError,
-               }
                // Serialize and write the new monitor
                let mut monitor_bytes = Vec::with_capacity(
                        MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(),
@@ -641,65 +624,12 @@ where
                        &monitor_bytes,
                ) {
                        Ok(_) => {
-                               // Assess cleanup. Typically, we'll clean up only between the last two known full
-                               // monitors.
-                               if let Ok((_, old_monitor)) = maybe_old_monitor {
-                                       let start = old_monitor.get_latest_update_id();
-                                       let end = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
-                                               // We don't want to clean the rest of u64, so just do possible pending
-                                               // updates. Note that we never write updates at
-                                               // `CLOSED_CHANNEL_UPDATE_ID`.
-                                               cmp::min(
-                                                       start.saturating_add(self.maximum_pending_updates),
-                                                       CLOSED_CHANNEL_UPDATE_ID - 1,
-                                               )
-                                       } else {
-                                               monitor.get_latest_update_id().saturating_sub(1)
-                                       };
-                                       // We should bother cleaning up only if there's at least one update
-                                       // expected.
-                                       for update_id in start..=end {
-                                               let update_name = UpdateName::from(update_id);
-                                               #[cfg(debug_assertions)]
-                                               {
-                                                       if let Ok(update) =
-                                                               self.read_monitor_update(&monitor_name, &update_name)
-                                                       {
-                                                               // Assert that we are reading what we think we are.
-                                                               debug_assert_eq!(update.update_id, update_name.0);
-                                                       } else if update_id != start && monitor.get_latest_update_id() != CLOSED_CHANNEL_UPDATE_ID
-                                                       {
-                                                               // We're deleting something we should know doesn't exist.
-                                                               panic!(
-                                                                       "failed to read monitor update {}",
-                                                                       update_name.as_str()
-                                                               );
-                                                       }
-                                                       // On closed channels, we will unavoidably try to read
-                                                       // non-existent updates since we have to guess at the range of
-                                                       // stale updates, so do nothing.
-                                               }
-                                               if let Err(e) = self.kv_store.remove(
-                                                       CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
-                                                       monitor_name.as_str(),
-                                                       update_name.as_str(),
-                                                       true,
-                                               ) {
-                                                       log_error!(
-                                                               self.logger,
-                                                               "error cleaning up channel monitor updates for monitor {}, reason: {}",
-                                                               monitor_name.as_str(),
-                                                               e
-                                                       );
-                                               };
-                                       }
-                               };
                                chain::ChannelMonitorUpdateStatus::Completed
                        }
                        Err(e) => {
                                log_error!(
                                        self.logger,
-                                       "error writing channel monitor {}/{}/{} reason: {}",
+                                       "Failed to write ChannelMonitor {}/{}/{} reason: {}",
                                        CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
                                        CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
                                        monitor_name.as_str(),
@@ -741,7 +671,7 @@ where
                                        Err(e) => {
                                                log_error!(
                                                        self.logger,
-                                                       "error writing channel monitor update {}/{}/{} reason: {}",
+                                                       "Failed to write ChannelMonitorUpdate {}/{}/{} reason: {}",
                                                        CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
                                                        monitor_name.as_str(),
                                                        update_name.as_str(),
@@ -751,8 +681,41 @@ where
                                        }
                                }
                        } else {
-                               // We could write this update, but it meets criteria of our design that call for a full monitor write.
-                               self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
+                               let monitor_name = MonitorName::from(funding_txo);
+                               // In case of channel-close monitor update, we need to read old monitor before persisting
+                               // the new one in order to determine the cleanup range.
+                               let maybe_old_monitor = match monitor.get_latest_update_id() {
+                                       CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(),
+                                       _ => None
+                               };
+
+                               // We could write this update, but it meets criteria of our design that calls for a full monitor write.
+                               let monitor_update_status = self.persist_new_channel(funding_txo, monitor, monitor_update_call_id);
+
+                               if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status {
+                                       let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
+                                               // If there is an error while reading old monitor, we skip clean up.
+                                               maybe_old_monitor.map(|(_, ref old_monitor)| {
+                                                       let start = old_monitor.get_latest_update_id();
+                                                       // We never persist an update with update_id = CLOSED_CHANNEL_UPDATE_ID
+                                                       let end = cmp::min(
+                                                               start.saturating_add(self.maximum_pending_updates),
+                                                               CLOSED_CHANNEL_UPDATE_ID - 1,
+                                                       );
+                                                       (start, end)
+                                               })
+                                       } else {
+                                               let end = monitor.get_latest_update_id();
+                                               let start = end.saturating_sub(self.maximum_pending_updates);
+                                               Some((start, end))
+                                       };
+
+                                       if let Some((start, end)) = cleanup_range {
+                                               self.cleanup_in_range(monitor_name, start, end);
+                                       }
+                               }
+
+                               monitor_update_status
                        }
                } else {
                        // There is no update given, so we must persist a new monitor.
@@ -761,6 +724,34 @@ where
        }
 }
 
+impl<K: Deref, L: Deref, ES: Deref, SP: Deref> MonitorUpdatingPersister<K, L, ES, SP>
+where
+       ES::Target: EntropySource + Sized,
+       K::Target: KVStore,
+       L::Target: Logger,
+       SP::Target: SignerProvider + Sized
+{
+       // Cleans up monitor updates for given monitor in range `start..=end`.
+       fn cleanup_in_range(&self, monitor_name: MonitorName, start: u64, end: u64) {
+               for update_id in start..=end {
+                       let update_name = UpdateName::from(update_id);
+                       if let Err(e) = self.kv_store.remove(
+                               CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
+                               monitor_name.as_str(),
+                               update_name.as_str(),
+                               true,
+                       ) {
+                               log_error!(
+                                       self.logger,
+                                       "Failed to clean up channel monitor updates for monitor {}, reason: {}",
+                                       monitor_name.as_str(),
+                                       e
+                               );
+                       };
+               }
+       }
+}
+
 /// A struct representing a name for a monitor.
 #[derive(Debug)]
 struct MonitorName(String);
@@ -896,20 +887,21 @@ mod tests {
        #[test]
        fn persister_with_real_monitors() {
                // This value is used later to limit how many iterations we perform.
-               let test_max_pending_updates = 7;
+               let persister_0_max_pending_updates = 7;
+               // Intentionally set this to a smaller value to test a different alignment.
+               let persister_1_max_pending_updates = 3;
                let chanmon_cfgs = create_chanmon_cfgs(4);
                let persister_0 = MonitorUpdatingPersister {
                        kv_store: &TestStore::new(false),
                        logger: &TestLogger::new(),
-                       maximum_pending_updates: test_max_pending_updates,
+                       maximum_pending_updates: persister_0_max_pending_updates,
                        entropy_source: &chanmon_cfgs[0].keys_manager,
                        signer_provider: &chanmon_cfgs[0].keys_manager,
                };
                let persister_1 = MonitorUpdatingPersister {
                        kv_store: &TestStore::new(false),
                        logger: &TestLogger::new(),
-                       // Intentionally set this to a smaller value to test a different alignment.
-                       maximum_pending_updates: 3,
+                       maximum_pending_updates: persister_1_max_pending_updates,
                        entropy_source: &chanmon_cfgs[1].keys_manager,
                        signer_provider: &chanmon_cfgs[1].keys_manager,
                };
@@ -934,7 +926,6 @@ mod tests {
                node_cfgs[1].chain_monitor = chain_mon_1;
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
                let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
                let broadcaster_1 = &chanmon_cfgs[3].tx_broadcaster;
 
@@ -957,10 +948,11 @@ mod tests {
                                for (_, mon) in persisted_chan_data_0.iter() {
                                        // check that when we read it, we got the right update id
                                        assert_eq!(mon.get_latest_update_id(), $expected_update_id);
-                                       // if the CM is at the correct update id without updates, ensure no updates are stored
+
+                                       // if the CM is at consolidation threshold, ensure no updates are stored.
                                        let monitor_name = MonitorName::from(mon.get_funding_txo().0);
-                                       let (_, cm_0) = persister_0.read_monitor(&monitor_name).unwrap();
-                                       if cm_0.get_latest_update_id() == $expected_update_id {
+                                       if mon.get_latest_update_id() % persister_0_max_pending_updates == 0
+                                                       || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
                                                assert_eq!(
                                                        persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
                                                                monitor_name.as_str()).unwrap().len(),
@@ -975,8 +967,9 @@ mod tests {
                                for (_, mon) in persisted_chan_data_1.iter() {
                                        assert_eq!(mon.get_latest_update_id(), $expected_update_id);
                                        let monitor_name = MonitorName::from(mon.get_funding_txo().0);
-                                       let (_, cm_1) = persister_1.read_monitor(&monitor_name).unwrap();
-                                       if cm_1.get_latest_update_id() == $expected_update_id {
+                                       // if the CM is at consolidation threshold, ensure no updates are stored.
+                                       if mon.get_latest_update_id() % persister_1_max_pending_updates == 0
+                                                       || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
                                                assert_eq!(
                                                        persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
                                                                monitor_name.as_str()).unwrap().len(),
@@ -1001,7 +994,7 @@ mod tests {
                // Send a few more payments to try all the alignments of max pending updates with
                // updates for a payment sent and received.
                let mut sender = 0;
-               for i in 3..=test_max_pending_updates * 2 {
+               for i in 3..=persister_0_max_pending_updates * 2 {
                        let receiver;
                        if sender == 0 {
                                sender = 1;
index 0d5cfc81906fddacfe24ae97d202fe3f20f7383e..484d603404297d6aaf4e6c59f8f46af2a86fb4dc 100644 (file)
@@ -371,14 +371,14 @@ impl Writeable for BigSize {
        #[inline]
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                match self.0 {
-                       0...0xFC => {
+                       0..=0xFC => {
                                (self.0 as u8).write(writer)
                        },
-                       0xFD...0xFFFF => {
+                       0xFD..=0xFFFF => {
                                0xFDu8.write(writer)?;
                                (self.0 as u16).write(writer)
                        },
-                       0x10000...0xFFFFFFFF => {
+                       0x10000..=0xFFFFFFFF => {
                                0xFEu8.write(writer)?;
                                (self.0 as u32).write(writer)
                        },
index 782e786ff843eb08f5b5e0146decc9e8b1138af5..84d9f7a180bb301a1c18a136530a04ce75589a9e 100644 (file)
@@ -917,7 +917,7 @@ macro_rules! tlv_stream {
 
                #[cfg_attr(test, derive(PartialEq))]
                #[derive(Debug)]
-               pub(super) struct $nameref<'a> {
+               pub(crate) struct $nameref<'a> {
                        $(
                                pub(super) $field: Option<tlv_record_ref_type!($fieldty)>,
                        )*
index 9c6a1e30224a14b21eff915b3bc3666f96e7b2d5..a2cbf78b70053d2467cbe41fafe041eb30fa68e4 100644 (file)
@@ -11,7 +11,8 @@ use crate::ln::channel::{ANCHOR_OUTPUT_VALUE_SATOSHI, MIN_CHAN_DUST_LIMIT_SATOSH
 use crate::ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, HolderCommitmentTransaction, CommitmentTransaction, ChannelTransactionParameters, TrustedCommitmentTransaction, ClosingTransaction};
 use crate::ln::channel_keys::{HtlcKey};
 use crate::ln::{msgs, PaymentPreimage};
-use crate::sign::{WriteableEcdsaChannelSigner, InMemorySigner, ChannelSigner, EcdsaChannelSigner};
+use crate::sign::{InMemorySigner, ChannelSigner};
+use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
 
 use crate::prelude::*;
 use core::cmp;
@@ -24,12 +25,20 @@ use bitcoin::sighash;
 use bitcoin::sighash::EcdsaSighashType;
 
 use bitcoin::secp256k1;
+#[cfg(taproot)]
+use bitcoin::secp256k1::All;
 use bitcoin::secp256k1::{SecretKey, PublicKey};
 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
+#[cfg(taproot)]
+use musig2::types::{PartialSignature, PublicNonce, SecretNonce};
 use crate::sign::HTLCDescriptor;
 use crate::util::ser::{Writeable, Writer};
 use crate::io::Error;
 use crate::ln::features::ChannelTypeFeatures;
+#[cfg(taproot)]
+use crate::ln::msgs::PartialSignatureWithNonce;
+#[cfg(taproot)]
+use crate::sign::taproot::TaprootChannelSigner;
 
 /// Initial value for revoked commitment downward counter
 pub const INITIAL_REVOKED_COMMITMENT_NUMBER: u64 = 1 << 48;
@@ -129,7 +138,7 @@ impl ChannelSigner for TestChannelSigner {
                self.inner.release_commitment_secret(idx)
        }
 
-       fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction, _preimages: Vec<PaymentPreimage>) -> Result<(), ()> {
+       fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction, _outbound_htlc_preimages: Vec<PaymentPreimage>) -> Result<(), ()> {
                let mut state = self.state.lock().unwrap();
                let idx = holder_tx.commitment_number();
                assert!(idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, "expecting to validate the current or next holder commitment - trying {}, current {}", idx, state.last_holder_commitment);
@@ -137,6 +146,16 @@ impl ChannelSigner for TestChannelSigner {
                Ok(())
        }
 
+       fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> {
+               if !*self.available.lock().unwrap() {
+                       return Err(());
+               }
+               let mut state = self.state.lock().unwrap();
+               assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment);
+               state.last_counterparty_revoked_commitment = idx;
+               Ok(())
+       }
+
        fn pubkeys(&self) -> &ChannelPublicKeys { self.inner.pubkeys() }
 
        fn channel_keys_id(&self) -> [u8; 32] { self.inner.channel_keys_id() }
@@ -147,7 +166,7 @@ impl ChannelSigner for TestChannelSigner {
 }
 
 impl EcdsaChannelSigner for TestChannelSigner {
-       fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction, preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
+       fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction, inbound_htlc_preimages: Vec<PaymentPreimage>, outbound_htlc_preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
                self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx);
 
                {
@@ -166,17 +185,7 @@ impl EcdsaChannelSigner for TestChannelSigner {
                        state.last_counterparty_commitment = cmp::min(last_commitment_number, actual_commitment_number)
                }
 
-               Ok(self.inner.sign_counterparty_commitment(commitment_tx, preimages, secp_ctx).unwrap())
-       }
-
-       fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> {
-               if !*self.available.lock().unwrap() {
-                       return Err(());
-               }
-               let mut state = self.state.lock().unwrap();
-               assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment);
-               state.last_counterparty_revoked_commitment = idx;
-               Ok(())
+               Ok(self.inner.sign_counterparty_commitment(commitment_tx, inbound_htlc_preimages, outbound_htlc_preimages, secp_ctx).unwrap())
        }
 
        fn sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
@@ -201,11 +210,11 @@ impl EcdsaChannelSigner for TestChannelSigner {
        }
 
        fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               Ok(self.inner.sign_justice_revoked_output(justice_tx, input, amount, per_commitment_key, secp_ctx).unwrap())
+               Ok(EcdsaChannelSigner::sign_justice_revoked_output(&self.inner, justice_tx, input, amount, per_commitment_key, secp_ctx).unwrap())
        }
 
        fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               Ok(self.inner.sign_justice_revoked_htlc(justice_tx, input, amount, per_commitment_key, htlc, secp_ctx).unwrap())
+               Ok(EcdsaChannelSigner::sign_justice_revoked_htlc(&self.inner, justice_tx, input, amount, per_commitment_key, htlc, secp_ctx).unwrap())
        }
 
        fn sign_holder_htlc_transaction(
@@ -241,11 +250,11 @@ impl EcdsaChannelSigner for TestChannelSigner {
                                &hash_to_message!(sighash.as_byte_array()), &htlc_descriptor.counterparty_sig, &countersignatory_htlc_key.to_public_key()
                        ).unwrap();
                }
-               Ok(self.inner.sign_holder_htlc_transaction(htlc_tx, input, htlc_descriptor, secp_ctx).unwrap())
+               Ok(EcdsaChannelSigner::sign_holder_htlc_transaction(&self.inner, htlc_tx, input, htlc_descriptor, secp_ctx).unwrap())
        }
 
        fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               Ok(self.inner.sign_counterparty_htlc_transaction(htlc_tx, input, amount, per_commitment_point, htlc, secp_ctx).unwrap())
+               Ok(EcdsaChannelSigner::sign_counterparty_htlc_transaction(&self.inner, htlc_tx, input, amount, per_commitment_point, htlc, secp_ctx).unwrap())
        }
 
        fn sign_closing_transaction(&self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
@@ -261,7 +270,7 @@ impl EcdsaChannelSigner for TestChannelSigner {
                // As long as our minimum dust limit is enforced and is greater than our anchor output
                // value, an anchor output can only have an index within [0, 1].
                assert!(anchor_tx.input[input].previous_output.vout == 0 || anchor_tx.input[input].previous_output.vout == 1);
-               self.inner.sign_holder_anchor_input(anchor_tx, input, secp_ctx)
+               EcdsaChannelSigner::sign_holder_anchor_input(&self.inner, anchor_tx, input, secp_ctx)
        }
 
        fn sign_channel_announcement_with_funding_key(
@@ -273,6 +282,45 @@ impl EcdsaChannelSigner for TestChannelSigner {
 
 impl WriteableEcdsaChannelSigner for TestChannelSigner {}
 
+#[cfg(taproot)]
+impl TaprootChannelSigner for TestChannelSigner {
+       fn generate_local_nonce_pair(&self, commitment_number: u64, secp_ctx: &Secp256k1<All>) -> PublicNonce {
+               todo!()
+       }
+
+       fn partially_sign_counterparty_commitment(&self, counterparty_nonce: PublicNonce, commitment_tx: &CommitmentTransaction, inbound_htlc_preimages: Vec<PaymentPreimage>, outbound_htlc_preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<All>) -> Result<(PartialSignatureWithNonce, Vec<secp256k1::schnorr::Signature>), ()> {
+               todo!()
+       }
+
+       fn finalize_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, counterparty_partial_signature: PartialSignatureWithNonce, secp_ctx: &Secp256k1<All>) -> Result<PartialSignature, ()> {
+               todo!()
+       }
+
+       fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<All>) -> Result<secp256k1::schnorr::Signature, ()> {
+               todo!()
+       }
+
+       fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<All>) -> Result<secp256k1::schnorr::Signature, ()> {
+               todo!()
+       }
+
+       fn sign_holder_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor, secp_ctx: &Secp256k1<All>) -> Result<secp256k1::schnorr::Signature, ()> {
+               todo!()
+       }
+
+       fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<All>) -> Result<secp256k1::schnorr::Signature, ()> {
+               todo!()
+       }
+
+       fn partially_sign_closing_transaction(&self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<All>) -> Result<PartialSignature, ()> {
+               todo!()
+       }
+
+       fn sign_holder_anchor_input(&self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<All>) -> Result<secp256k1::schnorr::Signature, ()> {
+               todo!()
+       }
+}
+
 impl Writeable for TestChannelSigner {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
                // TestChannelSigner has two fields - `inner` ([`InMemorySigner`]) and `state`
index e9a7f0ebfca856cfb3a88aec9c32fad4e3f50507..a006d37e90e434f92905cd66a06138820ff731b1 100644 (file)
@@ -7,6 +7,8 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
+use crate::blinded_path::BlindedPath;
+use crate::blinded_path::payment::ReceiveTlvs;
 use crate::chain;
 use crate::chain::WatchedOutput;
 use crate::chain::chaininterface;
@@ -17,21 +19,23 @@ use crate::chain::chainmonitor::{MonitorUpdateId, UpdateOrigin};
 use crate::chain::channelmonitor;
 use crate::chain::channelmonitor::MonitorEvent;
 use crate::chain::transaction::OutPoint;
+use crate::routing::router::{CandidateRouteHop, FirstHopCandidate, PublicHopCandidate, PrivateHopCandidate};
 use crate::sign;
 use crate::events;
 use crate::events::bump_transaction::{WalletSource, Utxo};
 use crate::ln::ChannelId;
-use crate::ln::channelmanager;
+use crate::ln::channelmanager::{ChannelDetails, self};
 use crate::ln::chan_utils::CommitmentTransaction;
 use crate::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
 use crate::ln::{msgs, wire};
 use crate::ln::msgs::LightningError;
 use crate::ln::script::ShutdownScript;
-use crate::offers::invoice::UnsignedBolt12Invoice;
+use crate::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
 use crate::offers::invoice_request::UnsignedInvoiceRequest;
-use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
+use crate::onion_message::{Destination, MessageRouter, OnionMessagePath};
+use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId, RoutingFees};
 use crate::routing::utxo::{UtxoLookup, UtxoLookupError, UtxoResult};
-use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParameters, Router, ScorerAccountingForInFlightHtlcs};
+use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParameters, RouteHintHop, Router, ScorerAccountingForInFlightHtlcs};
 use crate::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp};
 use crate::sync::RwLock;
 use crate::util::config::UserConfig;
@@ -50,7 +54,7 @@ use bitcoin::network::constants::Network;
 use bitcoin::hash_types::{BlockHash, Txid};
 use bitcoin::sighash::{SighashCache, EcdsaSighashType};
 
-use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey};
+use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey, self};
 use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature};
 use bitcoin::secp256k1::schnorr;
@@ -70,6 +74,7 @@ use crate::sign::{InMemorySigner, Recipient, EntropySource, NodeSigner, SignerPr
 
 #[cfg(feature = "std")]
 use std::time::{SystemTime, UNIX_EPOCH};
+use bitcoin::psbt::PartiallySignedTransaction;
 use bitcoin::Sequence;
 
 pub fn pubkey(byte: u8) -> PublicKey {
@@ -117,7 +122,7 @@ impl<'a> TestRouter<'a> {
 
 impl<'a> Router for TestRouter<'a> {
        fn find_route(
-               &self, payer: &PublicKey, params: &RouteParameters, first_hops: Option<&[&channelmanager::ChannelDetails]>,
+               &self, payer: &PublicKey, params: &RouteParameters, first_hops: Option<&[&ChannelDetails]>,
                inflight_htlcs: InFlightHtlcs
        ) -> Result<Route, msgs::LightningError> {
                if let Some((find_route_query, find_route_res)) = self.next_routes.lock().unwrap().pop_front() {
@@ -128,6 +133,7 @@ impl<'a> Router for TestRouter<'a> {
                                let scorer = ScorerAccountingForInFlightHtlcs::new(scorer, &inflight_htlcs);
                                for path in &route.paths {
                                        let mut aggregate_msat = 0u64;
+                                       let mut prev_hop_node = payer;
                                        for (idx, hop) in path.hops.iter().rev().enumerate() {
                                                aggregate_msat += hop.fee_msat;
                                                let usage = ChannelUsage {
@@ -136,14 +142,44 @@ impl<'a> Router for TestRouter<'a> {
                                                        effective_capacity: EffectiveCapacity::Unknown,
                                                };
 
-                                               // Since the path is reversed, the last element in our iteration is the first
-                                               // hop.
                                                if idx == path.hops.len() - 1 {
-                                                       scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage, &Default::default());
+                                                       if let Some(first_hops) = first_hops {
+                                                               if let Some(idx) = first_hops.iter().position(|h| h.get_outbound_payment_scid() == Some(hop.short_channel_id)) {
+                                                                       let node_id = NodeId::from_pubkey(payer);
+                                                                       let candidate = CandidateRouteHop::FirstHop(FirstHopCandidate {
+                                                                               details: first_hops[idx],
+                                                                               payer_node_id: &node_id,
+                                                                       });
+                                                                       scorer.channel_penalty_msat(&candidate, usage, &());
+                                                                       continue;
+                                                               }
+                                                       }
+                                               }
+                                               let network_graph = self.network_graph.read_only();
+                                               if let Some(channel) = network_graph.channel(hop.short_channel_id) {
+                                                       let (directed, _) = channel.as_directed_to(&NodeId::from_pubkey(&hop.pubkey)).unwrap();
+                                                       let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
+                                                               info: directed,
+                                                               short_channel_id: hop.short_channel_id,
+                                                       });
+                                                       scorer.channel_penalty_msat(&candidate, usage, &());
                                                } else {
-                                                       let curr_hop_path_idx = path.hops.len() - 1 - idx;
-                                                       scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path.hops[curr_hop_path_idx - 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage, &Default::default());
+                                                       let target_node_id = NodeId::from_pubkey(&hop.pubkey);
+                                                       let route_hint = RouteHintHop {
+                                                               src_node_id: *prev_hop_node,
+                                                               short_channel_id: hop.short_channel_id,
+                                                               fees: RoutingFees { base_msat: 0, proportional_millionths: 0 },
+                                                               cltv_expiry_delta: 0,
+                                                               htlc_minimum_msat: None,
+                                                               htlc_maximum_msat: None,
+                                                       };
+                                                       let candidate = CandidateRouteHop::PrivateHop(PrivateHopCandidate {
+                                                               hint: &route_hint,
+                                                               target_node_id: &target_node_id,
+                                                       });
+                                                       scorer.channel_penalty_msat(&candidate, usage, &());
                                                }
+                                               prev_hop_node = &hop.pubkey;
                                        }
                                }
                        }
@@ -156,6 +192,32 @@ impl<'a> Router for TestRouter<'a> {
                        &[42; 32]
                )
        }
+
+       fn create_blinded_payment_paths<
+               ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+       >(
+               &self, _recipient: PublicKey, _first_hops: Vec<ChannelDetails>, _tlvs: ReceiveTlvs,
+               _amount_msats: u64, _entropy_source: &ES, _secp_ctx: &Secp256k1<T>
+       ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
+               unreachable!()
+       }
+}
+
+impl<'a> MessageRouter for TestRouter<'a> {
+       fn find_path(
+               &self, _sender: PublicKey, _peers: Vec<PublicKey>, _destination: Destination
+       ) -> Result<OnionMessagePath, ()> {
+               unreachable!()
+       }
+
+       fn create_blinded_paths<
+               ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+       >(
+               &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
+               _secp_ctx: &Secp256k1<T>
+       ) -> Result<Vec<BlindedPath>, ()> {
+               unreachable!()
+       }
 }
 
 impl<'a> Drop for TestRouter<'a> {
@@ -175,13 +237,15 @@ impl EntropySource for OnlyReadsKeysInterface {
        fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }}
 
 impl SignerProvider for OnlyReadsKeysInterface {
-       type Signer = TestChannelSigner;
+       type EcdsaSigner = TestChannelSigner;
+       #[cfg(taproot)]
+       type TaprootSigner = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] { unreachable!(); }
 
-       fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer { unreachable!(); }
+       fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner { unreachable!(); }
 
-       fn read_chan_signer(&self, mut reader: &[u8]) -> Result<Self::Signer, msgs::DecodeError> {
+       fn read_chan_signer(&self, mut reader: &[u8]) -> Result<Self::EcdsaSigner, msgs::DecodeError> {
                let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
                let state = Arc::new(Mutex::new(EnforcementState::new()));
 
@@ -192,7 +256,7 @@ impl SignerProvider for OnlyReadsKeysInterface {
                ))
        }
 
-       fn get_destination_script(&self) -> Result<ScriptBuf, ()> { Err(()) }
+       fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> { Err(()) }
        fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> { Err(()) }
 }
 
@@ -200,7 +264,7 @@ pub struct TestChainMonitor<'a> {
        pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<TestChannelSigner>)>>,
        pub monitor_updates: Mutex<HashMap<ChannelId, Vec<channelmonitor::ChannelMonitorUpdate>>>,
        pub latest_monitor_update_id: Mutex<HashMap<ChannelId, (OutPoint, u64, MonitorUpdateId)>>,
-       pub chain_monitor: chainmonitor::ChainMonitor<TestChannelSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a chainmonitor::Persist<TestChannelSigner>>,
+       pub chain_monitor: chainmonitor::ChainMonitor<TestChannelSigner, &'a TestChainSource, &'a dyn chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a dyn chainmonitor::Persist<TestChannelSigner>>,
        pub keys_manager: &'a TestKeysInterface,
        /// If this is set to Some(), the next update_channel call (not watch_channel) must be a
        /// ChannelForceClosed event for the given channel_id with should_broadcast set to the given
@@ -211,7 +275,7 @@ pub struct TestChainMonitor<'a> {
        pub expect_monitor_round_trip_fail: Mutex<Option<ChannelId>>,
 }
 impl<'a> TestChainMonitor<'a> {
-       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist<TestChannelSigner>, keys_manager: &'a TestKeysInterface) -> Self {
+       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn chainmonitor::Persist<TestChannelSigner>, keys_manager: &'a TestKeysInterface) -> Self {
                Self {
                        added_monitors: Mutex::new(Vec::new()),
                        monitor_updates: Mutex::new(HashMap::new()),
@@ -334,7 +398,7 @@ impl WatchtowerPersister {
        }
 }
 
-impl<Signer: sign::WriteableEcdsaChannelSigner> chainmonitor::Persist<Signer> for WatchtowerPersister {
+impl<Signer: sign::ecdsa::WriteableEcdsaChannelSigner> chainmonitor::Persist<Signer> for WatchtowerPersister {
        fn persist_new_channel(&self, funding_txo: OutPoint,
                data: &channelmonitor::ChannelMonitor<Signer>, id: MonitorUpdateId
        ) -> chain::ChannelMonitorUpdateStatus {
@@ -414,7 +478,7 @@ impl TestPersister {
                self.update_rets.lock().unwrap().push_back(next_ret);
        }
 }
-impl<Signer: sign::WriteableEcdsaChannelSigner> chainmonitor::Persist<Signer> for TestPersister {
+impl<Signer: sign::ecdsa::WriteableEcdsaChannelSigner> chainmonitor::Persist<Signer> for TestPersister {
        fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                if let Some(update_ret) = self.update_rets.lock().unwrap().pop_front() {
                        return update_ret
@@ -928,7 +992,8 @@ impl events::MessageSendEventsProvider for TestRoutingMessageHandler {
 pub struct TestLogger {
        level: Level,
        pub(crate) id: String,
-       pub lines: Mutex<HashMap<(String, String), usize>>,
+       pub lines: Mutex<HashMap<(&'static str, String), usize>>,
+       pub context: Mutex<HashMap<(&'static str, Option<PublicKey>, Option<ChannelId>), usize>>,
 }
 
 impl TestLogger {
@@ -939,13 +1004,14 @@ impl TestLogger {
                TestLogger {
                        level: Level::Trace,
                        id,
-                       lines: Mutex::new(HashMap::new())
+                       lines: Mutex::new(HashMap::new()),
+                       context: Mutex::new(HashMap::new()),
                }
        }
        pub fn enable(&mut self, level: Level) {
                self.level = level;
        }
-       pub fn assert_log(&self, module: String, line: String, count: usize) {
+       pub fn assert_log(&self, module: &str, line: String, count: usize) {
                let log_entries = self.lines.lock().unwrap();
                assert_eq!(log_entries.get(&(module, line)), Some(&count));
        }
@@ -957,7 +1023,7 @@ impl TestLogger {
        pub fn assert_log_contains(&self, module: &str, line: &str, count: usize) {
                let log_entries = self.lines.lock().unwrap();
                let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| {
-                       m == module && l.contains(line)
+                       *m == module && l.contains(line)
                }).map(|(_, c) | { c }).sum();
                assert_eq!(l, count)
        }
@@ -970,15 +1036,24 @@ impl TestLogger {
        pub fn assert_log_regex(&self, module: &str, pattern: regex::Regex, count: usize) {
                let log_entries = self.lines.lock().unwrap();
                let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| {
-                       m == module && pattern.is_match(&l)
+                       *m == module && pattern.is_match(&l)
                }).map(|(_, c) | { c }).sum();
                assert_eq!(l, count)
        }
+
+       pub fn assert_log_context_contains(
+               &self, module: &str, peer_id: Option<PublicKey>, channel_id: Option<ChannelId>, count: usize
+       ) {
+               let context_entries = self.context.lock().unwrap();
+               let l = context_entries.get(&(module, peer_id, channel_id)).unwrap();
+               assert_eq!(*l, count)
+       }
 }
 
 impl Logger for TestLogger {
-       fn log(&self, record: &Record) {
-               *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
+       fn log(&self, record: Record) {
+               *self.lines.lock().unwrap().entry((record.module_path, format!("{}", record.args))).or_insert(0) += 1;
+               *self.context.lock().unwrap().entry((record.module_path, record.peer_id, record.channel_id)).or_insert(0) += 1;
                if record.level >= self.level {
                        #[cfg(all(not(ldk_bench), feature = "std"))] {
                                let pfx = format!("{} {} [{}:{}]", self.id, record.level.to_string(), record.module_path, record.line);
@@ -1096,7 +1171,9 @@ impl NodeSigner for TestKeysInterface {
 }
 
 impl SignerProvider for TestKeysInterface {
-       type Signer = TestChannelSigner;
+       type EcdsaSigner = TestChannelSigner;
+       #[cfg(taproot)]
+       type TaprootSigner = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, inbound: bool, channel_value_satoshis: u64, user_channel_id: u128) -> [u8; 32] {
                self.backing.generate_channel_keys_id(inbound, channel_value_satoshis, user_channel_id)
@@ -1108,7 +1185,7 @@ impl SignerProvider for TestKeysInterface {
                TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
        }
 
-       fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::Signer, msgs::DecodeError> {
+       fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::EcdsaSigner, msgs::DecodeError> {
                let mut reader = io::Cursor::new(buffer);
 
                let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
@@ -1121,7 +1198,7 @@ impl SignerProvider for TestKeysInterface {
                ))
        }
 
-       fn get_destination_script(&self) -> Result<ScriptBuf, ()> { self.backing.get_destination_script() }
+       fn get_destination_script(&self, channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> { self.backing.get_destination_script(channel_keys_id) }
 
        fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
                match &mut *self.expectations.lock().unwrap() {
@@ -1282,8 +1359,12 @@ impl crate::util::ser::Writeable for TestScorer {
 impl ScoreLookUp for TestScorer {
        type ScoreParams = ();
        fn channel_penalty_msat(
-               &self, short_channel_id: u64, _source: &NodeId, _target: &NodeId, usage: ChannelUsage, _score_params: &Self::ScoreParams
+               &self, candidate: &CandidateRouteHop, usage: ChannelUsage, _score_params: &Self::ScoreParams
        ) -> u64 {
+               let short_channel_id = match candidate.globally_unique_short_channel_id() {
+                       Some(scid) => scid,
+                       None => return 0,
+               };
                if let Some(scorer_expectations) = self.scorer_expectations.borrow_mut().as_mut() {
                        match scorer_expectations.pop_front() {
                                Some((scid, expectation)) => {
@@ -1298,13 +1379,15 @@ impl ScoreLookUp for TestScorer {
 }
 
 impl ScoreUpdate for TestScorer {
-       fn payment_path_failed(&mut self, _actual_path: &Path, _actual_short_channel_id: u64) {}
+       fn payment_path_failed(&mut self, _actual_path: &Path, _actual_short_channel_id: u64, _duration_since_epoch: Duration) {}
+
+       fn payment_path_successful(&mut self, _actual_path: &Path, _duration_since_epoch: Duration) {}
 
-       fn payment_path_successful(&mut self, _actual_path: &Path) {}
+       fn probe_failed(&mut self, _actual_path: &Path, _: u64, _duration_since_epoch: Duration) {}
 
-       fn probe_failed(&mut self, _actual_path: &Path, _: u64) {}
+       fn probe_successful(&mut self, _actual_path: &Path, _duration_since_epoch: Duration) {}
 
-       fn probe_successful(&mut self, _actual_path: &Path) {}
+       fn time_passed(&mut self, _duration_since_epoch: Duration) {}
 }
 
 impl Drop for TestScorer {
@@ -1366,7 +1449,8 @@ impl WalletSource for TestWalletSource {
                Ok(ScriptBuf::new_p2pkh(&public_key.pubkey_hash()))
        }
 
-       fn sign_tx(&self, mut tx: Transaction) -> Result<Transaction, ()> {
+       fn sign_psbt(&self, psbt: PartiallySignedTransaction) -> Result<Transaction, ()> {
+               let mut tx = psbt.extract_tx();
                let utxos = self.utxos.borrow();
                for i in 0..tx.input.len() {
                        if let Some(utxo) = utxos.iter().find(|utxo| utxo.outpoint == tx.input[i].previous_output) {
index d73360749dfaa0d9fb9aed985ef3121e6beaa543..3a4acc675e6be5bbe915e70311d3ee7e7d2c328e 100644 (file)
@@ -1,7 +1,7 @@
 [package]
 name = "msrv-check"
 version = "0.1.0"
-edition = "2018"
+edition = "2021"
 
 [dependencies]
 lightning = { path = "../lightning" }
@@ -11,3 +11,4 @@ lightning-net-tokio = { path = "../lightning-net-tokio" }
 lightning-persister = { path = "../lightning-persister" }
 lightning-background-processor = { path = "../lightning-background-processor", features = ["futures"] }
 lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync" }
+lightning-custom-message = { path = "../lightning-custom-message" }
index 16d2fc110e2a42252dbc83afb623f6a76bff570d..c9d404c922f86012b87e1b961ca451e60c49e0bb 100644 (file)
@@ -1,7 +1,7 @@
 [package]
 name = "no-std-check"
 version = "0.1.0"
-edition = "2018"
+edition = "2021"
 
 [features]
 default = ["lightning/no-std", "lightning-invoice/no-std", "lightning-rapid-gossip-sync/no-std"]
diff --git a/pending_changelog/113-channel-ser-compat.txt b/pending_changelog/113-channel-ser-compat.txt
deleted file mode 100644 (file)
index 9bba9fd..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
- * `ChannelManager`s written with LDK 0.0.119 are no longer readable by versions
-   of LDK prior to 0.0.113. Users wishing to downgrade to LDK 0.0.112 or before
-   can read an 0.0.119-serialized `ChannelManager` with a version of LDK from
-   0.0.113 to 0.0.118, re-serialize it, and then downgrade.
diff --git a/pending_changelog/electrum.txt b/pending_changelog/electrum.txt
deleted file mode 100644 (file)
index 5171f5e..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-## API Updates
-
-- The `Confirm::get_relevant_txids()` call now also returns the height under which LDK expects the respective transaction to be confirmed.