fail-fast: false
matrix:
platform: [ ubuntu-latest, windows-latest, macos-latest ]
- toolchain: [ stable, beta ]
- include:
- - toolchain: stable
- platform: ubuntu-latest
- # 1.48.0 is the MSRV for all crates except lightning-transaction-sync and Win/Mac
- - toolchain: 1.48.0
- platform: ubuntu-latest
- # Windows requires 1.49.0 because that's the MSRV for supported Tokio
- - toolchain: 1.49.0
- platform: windows-latest
- # MacOS-latest requires 1.54.0 because that's what's required for linking to work properly
- - toolchain: 1.54.0
- platform: macos-latest
+ toolchain: [ stable, beta, 1.63.0 ] # 1.63.0 is the MSRV for all crates.
runs-on: ${{ matrix.platform }}
steps:
- name: Checkout source code
run: |
rustup target add thumbv7m-none-eabi
sudo apt-get -y install gcc-arm-none-eabi
+ - name: Check for unknown cfg tags
+ run: ci/check-cfg-flags.py
- name: shellcheck the CI script
if: "matrix.platform == 'ubuntu-latest'"
run: |
run: |
cargo check --release
cargo check --no-default-features --features=no-std --release
- cargo check --no-default-features --features=futures --release
+ cargo check --no-default-features --features=futures,std --release
cargo doc --release
- name: Run cargo check for Taproot build.
run: |
cargo check --release
cargo check --no-default-features --features=no-std --release
- cargo check --no-default-features --features=futures --release
+ cargo check --no-default-features --features=futures,std --release
cargo doc --release
env:
RUSTFLAGS: '--cfg=taproot'
When refactoring, structure your PR to make it easy to review and don't
hesitate to split it into multiple small, focused PRs.
-The Minimum Supported Rust Version (MSRV) currently is 1.48.0 (enforced by
+The Minimum Supported Rust Version (MSRV) currently is 1.63.0 (enforced by
our GitHub Actions). We support reading serialized LDK objects written by any
version of LDK 0.0.99 and above. We support LDK versions 0.0.113 and above
reading serialized LDK objects written by modern LDK. Any expected issues with
[workspace]
+resolver = "2"
members = [
"lightning",
"lightning-net-tokio",
"lightning-persister",
"lightning-background-processor",
- "lightning-rapid-gossip-sync"
+ "lightning-rapid-gossip-sync",
+ "lightning-custom-message",
]
exclude = [
- "lightning-custom-message",
"lightning-transaction-sync",
"no-std-check",
"msrv-no-dev-deps-check",
name = "lightning-bench"
version = "0.0.1"
authors = ["Matt Corallo"]
-edition = "2018"
+edition = "2021"
[[bench]]
name = "bench"
--- /dev/null
+#!/usr/bin/env python3
+# Rust is fairly relaxed in checking the validity of arguments passed to #[cfg].
+# While it should probably be more strict when checking features, it cannot be
+# strict when checking loose cfg tags, because those can be anything and are
+# simply passed to rustc via unconstrained arguments.
+#
+# Thus, we do it for rustc manually, but scanning all our source and checking
+# that all our cfg tags match a known cfg tag.
+import sys, glob, re
+
+def check_feature(feature):
+ if feature == "std":
+ pass
+ elif feature == "no-std":
+ pass
+ elif feature == "hashbrown":
+ pass
+ elif feature == "backtrace":
+ pass
+ elif feature == "grind_signatures":
+ pass
+ elif feature == "unsafe_revoked_tx_signing":
+ pass
+ elif feature == "futures":
+ pass
+ elif feature == "tokio":
+ pass
+ elif feature == "rest-client":
+ pass
+ elif feature == "rpc-client":
+ pass
+ elif feature == "serde":
+ pass
+ elif feature == "esplora-blocking":
+ pass
+ elif feature == "esplora-async":
+ pass
+ elif feature == "async-interface":
+ pass
+ elif feature == "electrum":
+ pass
+ elif feature == "_test_utils":
+ pass
+ elif feature == "_test_vectors":
+ pass
+ elif feature == "afl":
+ pass
+ elif feature == "honggfuzz":
+ pass
+ elif feature == "libfuzzer_fuzz":
+ pass
+ elif feature == "stdin_fuzz":
+ pass
+ elif feature == "max_level_off":
+ pass
+ elif feature == "max_level_error":
+ pass
+ elif feature == "max_level_warn":
+ pass
+ elif feature == "max_level_info":
+ pass
+ elif feature == "max_level_debug":
+ pass
+ elif feature == "max_level_trace":
+ pass
+ else:
+ print("Bad feature: " + feature)
+ assert False
+
+def check_target_os(os):
+ if os == "windows":
+ pass
+ else:
+ assert False
+
+def check_cfg_tag(cfg):
+ if cfg == "fuzzing":
+ pass
+ elif cfg == "test":
+ pass
+ elif cfg == "debug_assertions":
+ pass
+ elif cfg == "c_bindings":
+ pass
+ elif cfg == "ldk_bench":
+ pass
+ elif cfg == "taproot":
+ pass
+ elif cfg == "require_route_graph_test":
+ pass
+ else:
+ print("Bad cfg tag: " + cfg)
+ assert False
+
+def check_cfg_args(cfg):
+ if cfg.startswith("all(") or cfg.startswith("any(") or cfg.startswith("not("):
+ brackets = 1
+ pos = 4
+ while pos < len(cfg):
+ if cfg[pos] == "(":
+ brackets += 1
+ elif cfg[pos] == ")":
+ brackets -= 1
+ if brackets == 0:
+ check_cfg_args(cfg[4:pos])
+ if pos + 1 != len(cfg):
+ assert cfg[pos + 1] == ","
+ check_cfg_args(cfg[pos + 2:].strip())
+ return
+ pos += 1
+ assert False
+ assert(cfg.endswith(")"))
+ check_cfg_args(cfg[4:len(cfg)-1])
+ else:
+ parts = [part.strip() for part in cfg.split(",", 1)]
+ if len(parts) > 1:
+ for part in parts:
+ check_cfg_args(part)
+ elif cfg.startswith("feature") or cfg.startswith("target_os") or cfg.startswith("target_pointer_width"):
+ arg = cfg
+ if cfg.startswith("feature"):
+ arg = arg[7:].strip()
+ elif cfg.startswith("target_os"):
+ arg = arg[9:].strip()
+ else:
+ arg = arg[20:].strip()
+ assert arg.startswith("=")
+ arg = arg[1:].strip()
+ assert arg.startswith("\"")
+ assert arg.endswith("\"")
+ arg = arg[1:len(arg)-1]
+ assert not "\"" in arg
+ if cfg.startswith("feature"):
+ check_feature(arg)
+ elif cfg.startswith("target_os"):
+ check_target_os(arg)
+ else:
+ assert arg == "32" or arg == "64"
+ else:
+ check_cfg_tag(cfg.strip())
+
+cfg_regex = re.compile("#\[cfg\((.*)\)\]")
+for path in glob.glob(sys.path[0] + "/../**/*.rs", recursive = True):
+ with open(path, "r") as file:
+ while True:
+ line = file.readline()
+ if not line:
+ break
+ if "#[cfg(" in line:
+ if not line.strip().startswith("//"):
+ cfg_part = cfg_regex.match(line.strip()).group(1)
+ check_cfg_args(cfg_part)
# which we do here.
# Further crates which appear only as dev-dependencies are pinned further down.
function PIN_RELEASE_DEPS {
- # Tokio MSRV on versions 1.17 through 1.26 is rustc 1.49. Above 1.26 MSRV is 1.56.
- [ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p tokio --precise "1.14.1" --verbose
- [[ "$RUSTC_MINOR_VERSION" -gt 48 && "$RUSTC_MINOR_VERSION" -lt 56 ]] && cargo update -p tokio --precise "1.25.1" --verbose
-
- # Sadly the log crate is always a dependency of tokio until 1.20, and has no reasonable MSRV guarantees
- [ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p log --precise "0.4.18" --verbose
-
- # The serde_json crate switched to Rust edition 2021 starting with v1.0.101, i.e., has MSRV of 1.56
- [ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p serde_json --precise "1.0.100" --verbose
-
return 0 # Don't fail the script if our rustc is higher than the last check
}
-PIN_RELEASE_DEPS # pin the release dependencies in our main workspace
-
-# The addr2line v0.20 crate (a dependency of `backtrace` starting with 0.3.68) relies on 1.55+
-[ "$RUSTC_MINOR_VERSION" -lt 55 ] && cargo update -p backtrace --precise "0.3.67" --verbose
-
-# The quote crate switched to Rust edition 2021 starting with v1.0.31, i.e., has MSRV of 1.56
-[ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p quote --precise "1.0.30" --verbose
+# The tests of `lightning-transaction-sync` require `electrs` and `bitcoind`
+# binaries. Here, we download the binaries, validate them, and export their
+# location via `ELECTRS_EXE`/`BITCOIND_EXE` which will be used by the
+# `electrsd`/`bitcoind` crates in our tests.
+function DOWNLOAD_ELECTRS_AND_BITCOIND {
+ ELECTRS_DL_ENDPOINT="https://github.com/RCasatta/electrsd/releases/download/electrs_releases"
+ ELECTRS_VERSION="esplora_a33e97e1a1fc63fa9c20a116bb92579bbf43b254"
+ BITCOIND_DL_ENDPOINT="https://bitcoincore.org/bin/"
+ BITCOIND_VERSION="25.1"
+ if [[ "$HOST_PLATFORM" == *linux* ]]; then
+ ELECTRS_DL_FILE_NAME=electrs_linux_"$ELECTRS_VERSION".zip
+ ELECTRS_DL_HASH="865e26a96e8df77df01d96f2f569dcf9622fc87a8d99a9b8fe30861a4db9ddf1"
+ BITCOIND_DL_FILE_NAME=bitcoin-"$BITCOIND_VERSION"-x86_64-linux-gnu.tar.gz
+ BITCOIND_DL_HASH="a978c407b497a727f0444156e397b50491ce862d1f906fef9b521415b3611c8b"
+ elif [[ "$HOST_PLATFORM" == *darwin* ]]; then
+ ELECTRS_DL_FILE_NAME=electrs_macos_"$ELECTRS_VERSION".zip
+ ELECTRS_DL_HASH="2d5ff149e8a2482d3658e9b386830dfc40c8fbd7c175ca7cbac58240a9505bcd"
+ BITCOIND_DL_FILE_NAME=bitcoin-"$BITCOIND_VERSION"-x86_64-apple-darwin.tar.gz
+ BITCOIND_DL_HASH="1acfde0ec3128381b83e3e5f54d1c7907871d324549129592144dd12a821eff1"
+ else
+ echo -e "\n\nUnsupported platform. Exiting.."
+ exit 1
+ fi
+
+ DL_TMP_DIR=$(mktemp -d)
+ trap 'rm -rf -- "$DL_TMP_DIR"' EXIT
+
+ pushd "$DL_TMP_DIR"
+ ELECTRS_DL_URL="$ELECTRS_DL_ENDPOINT"/"$ELECTRS_DL_FILE_NAME"
+ curl -L -o "$ELECTRS_DL_FILE_NAME" "$ELECTRS_DL_URL"
+ echo "$ELECTRS_DL_HASH $ELECTRS_DL_FILE_NAME"|shasum -a 256 -c
+ unzip "$ELECTRS_DL_FILE_NAME"
+ export ELECTRS_EXE="$DL_TMP_DIR"/electrs
+ chmod +x "$ELECTRS_EXE"
+
+ BITCOIND_DL_URL="$BITCOIND_DL_ENDPOINT"/bitcoin-core-"$BITCOIND_VERSION"/"$BITCOIND_DL_FILE_NAME"
+ curl -L -o "$BITCOIND_DL_FILE_NAME" "$BITCOIND_DL_URL"
+ echo "$BITCOIND_DL_HASH $BITCOIND_DL_FILE_NAME"|shasum -a 256 -c
+ tar xzf "$BITCOIND_DL_FILE_NAME"
+ export BITCOIND_EXE="$DL_TMP_DIR"/bitcoin-"$BITCOIND_VERSION"/bin/bitcoind
+ chmod +x "$BITCOIND_EXE"
+ popd
+}
-# The syn crate depends on too-new proc-macro2 starting with v2.0.33, i.e., has MSRV of 1.56
-if [ "$RUSTC_MINOR_VERSION" -lt 56 ]; then
- SYN_2_DEP=$(grep -o '"syn 2.*' Cargo.lock | tr -d '",' | tr ' ' ':')
- cargo update -p "$SYN_2_DEP" --precise "2.0.32" --verbose
-fi
+PIN_RELEASE_DEPS # pin the release dependencies in our main workspace
-# The proc-macro2 crate switched to Rust edition 2021 starting with v1.0.66, i.e., has MSRV of 1.56
-[ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p proc-macro2 --precise "1.0.65" --verbose
+# Starting with version 1.10.0, the `regex` crate has an MSRV of rustc 1.65.0.
+[ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p regex --precise "1.9.6" --verbose
-# The memchr crate switched to an MSRV of 1.60 starting with v2.6.0
-[ "$RUSTC_MINOR_VERSION" -lt 60 ] && cargo update -p memchr --precise "2.5.0" --verbose
+# The addr2line v0.21 crate (a dependency of `backtrace` starting with 0.3.69) relies on rustc 1.65
+[ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p backtrace --precise "0.3.68" --verbose
export RUST_BACKTRACE=1
cargo check --verbose --color always --features rpc-client,rest-client,tokio
popd
-if [[ $RUSTC_MINOR_VERSION -gt 67 && "$HOST_PLATFORM" != *windows* ]]; then
+if [[ "$HOST_PLATFORM" != *windows* ]]; then
echo -e "\n\nBuilding and testing Transaction Sync Clients with features"
pushd lightning-transaction-sync
- cargo test --verbose --color always --features esplora-blocking
- cargo check --verbose --color always --features esplora-blocking
- cargo test --verbose --color always --features esplora-async
- cargo check --verbose --color always --features esplora-async
- cargo test --verbose --color always --features esplora-async-https
- cargo check --verbose --color always --features esplora-async-https
- cargo test --verbose --color always --features electrum
- cargo check --verbose --color always --features electrum
+
+ # reqwest 0.11.21 had a regression that broke its 1.63.0 MSRV
+ [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p reqwest --precise "0.11.20" --verbose
+ # Starting with version 1.10.0, the `regex` crate has an MSRV of rustc 1.65.0.
+ [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p regex --precise "1.9.6" --verbose
+
+ DOWNLOAD_ELECTRS_AND_BITCOIND
+
+ RUSTFLAGS="--cfg no_download" cargo test --verbose --color always --features esplora-blocking
+ RUSTFLAGS="--cfg no_download" cargo check --verbose --color always --features esplora-blocking
+ RUSTFLAGS="--cfg no_download" cargo test --verbose --color always --features esplora-async
+ RUSTFLAGS="--cfg no_download" cargo check --verbose --color always --features esplora-async
+ RUSTFLAGS="--cfg no_download" cargo test --verbose --color always --features esplora-async-https
+ RUSTFLAGS="--cfg no_download" cargo check --verbose --color always --features esplora-async-https
+ RUSTFLAGS="--cfg no_download" cargo test --verbose --color always --features electrum
+ RUSTFLAGS="--cfg no_download" cargo check --verbose --color always --features electrum
+
popd
fi
cargo test --verbose --color always --features futures
popd
-if [ "$RUSTC_MINOR_VERSION" -gt 55 ]; then
- echo -e "\n\nTest Custom Message Macros"
- pushd lightning-custom-message
- cargo test --verbose --color always
- [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
- popd
-fi
+echo -e "\n\nTest Custom Message Macros"
+pushd lightning-custom-message
+cargo test --verbose --color always
+[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
+popd
-if [ "$RUSTC_MINOR_VERSION" -gt 51 ]; then # Current `object` MSRV, subject to change
- echo -e "\n\nTest backtrace-debug builds"
- pushd lightning
- cargo test --verbose --color always --features backtrace
- popd
-fi
+echo -e "\n\nTest backtrace-debug builds"
+pushd lightning
+cargo test --verbose --color always --features backtrace
+popd
echo -e "\n\nBuilding with all Log-Limiting features"
pushd lightning
echo -e "\n\nTesting no-std flags in various combinations"
for DIR in lightning lightning-invoice lightning-rapid-gossip-sync; do
- [ "$RUSTC_MINOR_VERSION" -gt 50 ] && cargo test -p $DIR --verbose --color always --no-default-features --features no-std
+ cargo test -p $DIR --verbose --color always --no-default-features --features no-std
# check if there is a conflict between no-std and the default std feature
- [ "$RUSTC_MINOR_VERSION" -gt 50 ] && cargo test -p $DIR --verbose --color always --features no-std
+ cargo test -p $DIR --verbose --color always --features no-std
done
+
for DIR in lightning lightning-invoice lightning-rapid-gossip-sync; do
# check if there is a conflict between no-std and the c_bindings cfg
- [ "$RUSTC_MINOR_VERSION" -gt 50 ] && RUSTFLAGS="--cfg=c_bindings" cargo test -p $DIR --verbose --color always --no-default-features --features=no-std
+ RUSTFLAGS="--cfg=c_bindings" cargo test -p $DIR --verbose --color always --no-default-features --features=no-std
done
RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always
echo -e "\n\nTesting no-std build on a downstream no-std crate"
# check no-std compatibility across dependencies
pushd no-std-check
-if [[ $RUSTC_MINOR_VERSION -gt 67 ]]; then
- # lightning-transaction-sync's MSRV is 1.67
- cargo check --verbose --color always --features lightning-transaction-sync
-else
- # The memchr crate switched to an MSRV of 1.60 starting with v2.6.0
- # This is currently only a release dependency via core2, which we intend to work with
- # rust-bitcoin to remove soon.
- [ "$RUSTC_MINOR_VERSION" -lt 60 ] && cargo update -p memchr --precise "2.5.0" --verbose
- cargo check --verbose --color always
-fi
+cargo check --verbose --color always --features lightning-transaction-sync
[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
popd
version = "0.0.1"
authors = ["Automatically generated"]
publish = false
-edition = "2018"
+edition = "2021"
# Because the function is unused it gets dropped before we link lightning, so
# we have to duplicate build.rs here. Note that this is only required for
# fuzzing mode.
description = """
Utilities to perform required background tasks for Rust Lightning.
"""
-edition = "2018"
+edition = "2021"
[package.metadata.docs.rs]
all-features = true
//! running properly, and (2) either can or should be run in the background. See docs for
//! [`BackgroundProcessor`] for more details on the nitty-gritty.
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
#![deny(missing_docs)]
#![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
use lightning::ln::functional_test_utils::*;
use lightning::ln::msgs::{ChannelMessageHandler, Init};
use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
- use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
+ use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp, LockableScore};
use lightning::routing::router::{DefaultRouter, Path, RouteHop, CandidateRouteHop};
use lightning::util::config::UserConfig;
description = """
Utilities to fetch the chain data from a block source and feed them into Rust Lightning.
"""
-edition = "2018"
+edition = "2021"
[package.metadata.docs.rs]
all-features = true
bitcoin = "0.30.2"
hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
lightning = { version = "0.0.118", path = "../lightning" }
-tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
+tokio = { version = "1.0", features = [ "io-util", "net", "time", "rt" ], optional = true }
serde_json = { version = "1.0", optional = true }
chunked_transfer = { version = "1.4", optional = true }
//! Both features support either blocking I/O using `std::net::TcpStream` or, with feature `tokio`,
//! non-blocking I/O using `tokio::net::TcpStream` from inside a Tokio runtime.
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
#![deny(missing_docs)]
#![deny(unsafe_code)]
keywords = [ "lightning", "bitcoin", "invoice", "BOLT11" ]
readme = "README.md"
repository = "https://github.com/lightningdevkit/rust-lightning/"
-edition = "2018"
+edition = "2021"
[package.metadata.docs.rs]
all-features = true
version = "0.0.1"
authors = ["Automatically generated"]
publish = false
-edition = "2018"
+edition = "2021"
[package.metadata]
cargo-fuzz = true
for (idx, c) in hex.as_bytes().iter().filter(|&&c| c != b'\n').enumerate() {
b <<= 4;
match *c {
- b'A'...b'F' => b |= c - b'A' + 10,
- b'a'...b'f' => b |= c - b'a' + 10,
- b'0'...b'9' => b |= c - b'0',
+ b'A'..=b'F' => b |= c - b'A' + 10,
+ b'a'..=b'f' => b |= c - b'a' + 10,
+ b'0'..=b'9' => b |= c - b'0',
_ => panic!("Bad hex"),
}
if (idx & 1) == 1 {
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
#![deny(missing_docs)]
#![deny(non_upper_case_globals)]
Implementation of the rust-lightning network stack using Tokio.
For Rust-Lightning clients which wish to make direct connections to Lightning P2P nodes, this is a simple alternative to implementing the required network stack, especially for those already using Tokio.
"""
-edition = "2018"
+edition = "2021"
[package.metadata.docs.rs]
all-features = true
//!
//! [`PeerManager`]: lightning::ln::peer_handler::PeerManager
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
#![deny(missing_docs)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
description = """
Utilities for LDK data persistence and retrieval.
"""
-edition = "2018"
+edition = "2021"
[package.metadata.docs.rs]
all-features = true
//! Provides utilities for LDK data persistence and retrieval.
-//
-// TODO: Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
#![deny(missing_docs)]
authors = ["Arik Sosman <git@arik.io>"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning"
-edition = "2018"
+edition = "2021"
description = """
Utility to process gossip routing data from Rapid Gossip Sync Server.
"""
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
#![deny(missing_docs)]
#![deny(unsafe_code)]
description = """
Utilities for syncing LDK via the transaction-based `Confirm` interface.
"""
-edition = "2018"
+edition = "2021"
[package.metadata.docs.rs]
all-features = true
async-interface = []
[dependencies]
-lightning = { version = "0.0.118", path = "../lightning", default-features = false }
+lightning = { version = "0.0.118", path = "../lightning", default-features = false, features = ["std"] }
bitcoin = { version = "0.30.2", default-features = false }
bdk-macros = "0.6"
futures = { version = "0.3", optional = true }
electrum-client = { version = "0.18.0", optional = true }
[dev-dependencies]
-lightning = { version = "0.0.118", path = "../lightning", features = ["std", "_test_utils"] }
-electrsd = { version = "0.26.0", features = ["legacy", "esplora_a33e97e1", "bitcoind_25_0"] }
+lightning = { version = "0.0.118", path = "../lightning", default-features = false, features = ["std", "_test_utils"] }
tokio = { version = "1.14.0", features = ["full"] }
+
+[target.'cfg(not(no_download))'.dev-dependencies]
+electrsd = { version = "0.26.0", default-features = false, features = ["legacy", "esplora_a33e97e1", "bitcoind_25_0"] }
+
+[target.'cfg(no_download)'.dev-dependencies]
+electrsd = { version = "0.26.0", default-features = false, features = ["legacy"] }
//! [`ChainMonitor`]: lightning::chain::chainmonitor::ChainMonitor
//! [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
#![deny(missing_docs)]
#![deny(unsafe_code)]
Does most of the hard work, without implying a specific runtime, requiring clients implement basic network logic, chain interactions and disk storage.
Still missing tons of error-handling. See GitHub issues for suggested projects if you want to contribute. Don't have to bother telling you not to use this for anything serious, because you'd have to build a client around it to even try.
"""
-edition = "2018"
+edition = "2021"
[package.metadata.docs.rs]
features = ["std"]
/// be sure to manage both cases correctly.
///
/// Bitcoin transaction packages are defined in BIP 331 and here:
- /// https://github.com/bitcoin/bitcoin/blob/master/doc/policy/packages.md
+ /// <https://github.com/bitcoin/bitcoin/blob/master/doc/policy/packages.md>
fn broadcast_transactions(&self, txs: &[&Transaction]);
}
outpoint: OutPoint { txid, index: idx as u16 },
script_pubkey: output.script_pubkey,
};
- chain_source.register_output(output)
+ log_trace!(logger, "Adding monitoring for spends of outpoint {} to the filter", output.outpoint);
+ chain_source.register_output(output);
}
}
}
},
}
if let Some(ref chain_source) = self.chain_source {
- monitor.load_outputs_to_watch(chain_source);
+ monitor.load_outputs_to_watch(chain_source , &self.logger);
}
entry.insert(MonitorHolder {
monitor,
/// Loads the funding txo and outputs to watch into the given `chain::Filter` by repeatedly
/// calling `chain::Filter::register_output` and `chain::Filter::register_tx` until all outputs
/// have been registered.
- pub fn load_outputs_to_watch<F: Deref>(&self, filter: &F) where F::Target: chain::Filter {
+ pub fn load_outputs_to_watch<F: Deref, L: Deref>(&self, filter: &F, logger: &L)
+ where
+ F::Target: chain::Filter, L::Target: Logger,
+ {
let lock = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*lock);
+ log_trace!(&logger, "Registering funding outpoint {}", &lock.get_funding_txo().0);
filter.register_tx(&lock.get_funding_txo().0.txid, &lock.get_funding_txo().1);
for (txid, outputs) in lock.get_outputs_to_watch().iter() {
for (index, script_pubkey) in outputs.iter() {
assert!(*index <= u16::max_value() as u32);
+ let outpoint = OutPoint { txid: *txid, index: *index as u16 };
+ log_trace!(logger, "Registering outpoint {} with the filter for monitoring spends", outpoint);
filter.register_output(WatchedOutput {
block_hash: None,
- outpoint: OutPoint { txid: *txid, index: *index as u16 },
+ outpoint,
script_pubkey: script_pubkey.clone(),
});
}
// broadcastable commitment transaction has the HTLC in it, but it
// cannot currently change after channel initialization, so we don't
// need to here.
- let confirmed_htlcs_iter: &mut Iterator<Item = (&HTLCOutputInCommitment, Option<&HTLCSource>)> = &mut $confirmed_htlcs_list;
+ let confirmed_htlcs_iter: &mut dyn Iterator<Item = (&HTLCOutputInCommitment, Option<&HTLCSource>)> = &mut $confirmed_htlcs_list;
let mut matched_htlc = false;
for (ref broadcast_htlc, ref broadcast_source) in confirmed_htlcs_iter {
}
}
- fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(&mut self, broadcaster: &B, logger: &WithChannelMonitor<L>)
- where B::Target: BroadcasterInterface,
- L::Target: Logger,
- {
- let commit_txs = self.get_latest_holder_commitment_txn(logger);
- let mut txs = vec![];
- for tx in commit_txs.iter() {
- log_info!(logger, "Broadcasting local {}", log_tx!(tx));
- txs.push(tx);
- }
- broadcaster.broadcast_transactions(&txs);
+ fn generate_claimable_outpoints_and_watch_outputs(&mut self) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
+ let funding_outp = HolderFundingOutput::build(
+ self.funding_redeemscript.clone(),
+ self.channel_value_satoshis,
+ self.onchain_tx_handler.channel_type_features().clone()
+ );
+ let commitment_package = PackageTemplate::build_package(
+ self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
+ PackageSolvingData::HolderFundingOutput(funding_outp),
+ self.best_block.height(), self.best_block.height()
+ );
+ let mut claimable_outpoints = vec![commitment_package];
self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
+ // Although we aren't signing the transaction directly here, the transaction will be signed
+ // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
+ // new channel updates.
+ self.holder_tx_signed = true;
+ let mut watch_outputs = Vec::new();
+ // We can't broadcast our HTLC transactions while the commitment transaction is
+ // unconfirmed. We'll delay doing so until we detect the confirmed commitment in
+ // `transactions_confirmed`.
+ if !self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
+ // Because we're broadcasting a commitment transaction, we should construct the package
+ // assuming it gets confirmed in the next block. Sadly, we have code which considers
+ // "not yet confirmed" things as discardable, so we cannot do that here.
+ let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(
+ &self.current_holder_commitment_tx, self.best_block.height()
+ );
+ let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
+ let new_outputs = self.get_broadcasted_holder_watch_outputs(
+ &self.current_holder_commitment_tx, &unsigned_commitment_tx
+ );
+ if !new_outputs.is_empty() {
+ watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
+ }
+ claimable_outpoints.append(&mut new_outpoints);
+ }
+ (claimable_outpoints, watch_outputs)
+ }
+
+ pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast<B: Deref, F: Deref, L: Deref>(
+ &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &WithChannelMonitor<L>
+ )
+ where
+ B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs();
+ self.onchain_tx_handler.update_claims_view_from_requests(
+ claimable_outpoints, self.best_block.height(), self.best_block.height(), broadcaster,
+ fee_estimator, logger
+ );
}
fn update_monitor<B: Deref, F: Deref, L: Deref>(
log_trace!(logger, "Avoiding commitment broadcast, already detected confirmed spend onchain");
continue;
}
- self.broadcast_latest_holder_commitment_txn(broadcaster, logger);
- // If the channel supports anchor outputs, we'll need to emit an external
- // event to be consumed such that a child transaction is broadcast with a
- // high enough feerate for the parent commitment transaction to confirm.
- if self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
- let funding_output = HolderFundingOutput::build(
- self.funding_redeemscript.clone(), self.channel_value_satoshis,
- self.onchain_tx_handler.channel_type_features().clone(),
- );
- let best_block_height = self.best_block.height();
- let commitment_package = PackageTemplate::build_package(
- self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
- PackageSolvingData::HolderFundingOutput(funding_output),
- best_block_height, best_block_height
- );
- self.onchain_tx_handler.update_claims_view_from_requests(
- vec![commitment_package], best_block_height, best_block_height,
- broadcaster, &bounded_fee_estimator, logger,
- );
- }
+ self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger);
} else if !self.holder_tx_signed {
log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
log_error!(logger, " in channel monitor for channel {}!", &self.funding_info.0.to_channel_id());
}
}
+ /// Cancels any existing pending claims for a commitment that previously confirmed and has now
+ /// been replaced by another.
+ pub fn cancel_prev_commitment_claims<L: Deref>(
+ &mut self, logger: &L, confirmed_commitment_txid: &Txid
+ ) where L::Target: Logger {
+ for (counterparty_commitment_txid, _) in &self.counterparty_commitment_txn_on_chain {
+ // Cancel any pending claims for counterparty commitments we've seen confirm.
+ if counterparty_commitment_txid == confirmed_commitment_txid {
+ continue;
+ }
+ for (htlc, _) in self.counterparty_claimable_outpoints.get(counterparty_commitment_txid).unwrap_or(&vec![]) {
+ log_trace!(logger, "Canceling claims for previously confirmed counterparty commitment {}",
+ counterparty_commitment_txid);
+ let mut outpoint = BitcoinOutPoint { txid: *counterparty_commitment_txid, vout: 0 };
+ if let Some(vout) = htlc.transaction_output_index {
+ outpoint.vout = vout;
+ self.onchain_tx_handler.abandon_claim(&outpoint);
+ }
+ }
+ }
+ if self.holder_tx_signed {
+ // If we've signed, we may have broadcast either commitment (prev or current), and
+ // attempted to claim from it immediately without waiting for a confirmation.
+ if self.current_holder_commitment_tx.txid != *confirmed_commitment_txid {
+ log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}",
+ self.current_holder_commitment_tx.txid);
+ let mut outpoint = BitcoinOutPoint { txid: self.current_holder_commitment_tx.txid, vout: 0 };
+ for (htlc, _, _) in &self.current_holder_commitment_tx.htlc_outputs {
+ if let Some(vout) = htlc.transaction_output_index {
+ outpoint.vout = vout;
+ self.onchain_tx_handler.abandon_claim(&outpoint);
+ }
+ }
+ }
+ if let Some(prev_holder_commitment_tx) = &self.prev_holder_signed_commitment_tx {
+ if prev_holder_commitment_tx.txid != *confirmed_commitment_txid {
+ log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}",
+ prev_holder_commitment_tx.txid);
+ let mut outpoint = BitcoinOutPoint { txid: prev_holder_commitment_tx.txid, vout: 0 };
+ for (htlc, _, _) in &prev_holder_commitment_tx.htlc_outputs {
+ if let Some(vout) = htlc.transaction_output_index {
+ outpoint.vout = vout;
+ self.onchain_tx_handler.abandon_claim(&outpoint);
+ }
+ }
+ }
+ }
+ } else {
+ // No previous claim.
+ }
+ }
+
fn get_latest_holder_commitment_txn<L: Deref>(
&mut self, logger: &WithChannelMonitor<L>,
) -> Vec<Transaction> where L::Target: Logger {
if height > self.best_block.height() {
self.best_block = BestBlock::new(block_hash, height);
+ log_trace!(logger, "Connecting new block {} at height {}", block_hash, height);
self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, logger)
} else if block_hash != self.best_block.block_hash() {
self.best_block = BestBlock::new(block_hash, height);
+ log_trace!(logger, "Best block re-orged, replaced with new block {} at height {}", block_hash, height);
self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height);
self.onchain_tx_handler.block_disconnected(height + 1, broadcaster, fee_estimator, logger);
Vec::new()
let mut claimable_outpoints = Vec::new();
'tx_iter: for tx in &txn_matched {
let txid = tx.txid();
+ log_trace!(logger, "Transaction {} confirmed in block {}", txid , block_hash);
// If a transaction has already been confirmed, ensure we don't bother processing it duplicatively.
if Some(txid) == self.funding_spend_confirmed {
log_debug!(logger, "Skipping redundant processing of funding-spend tx {} as it was previously confirmed", txid);
commitment_tx_to_counterparty_output,
},
});
+ // Now that we've detected a confirmed commitment transaction, attempt to cancel
+ // pending claims for any commitments that were previously confirmed such that
+ // we don't continue claiming inputs that no longer exist.
+ self.cancel_prev_commitment_claims(&logger, &txid);
}
}
if tx.input.len() >= 1 {
let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
if should_broadcast {
- let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone(), self.channel_value_satoshis, self.onchain_tx_handler.channel_type_features().clone());
- let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), self.best_block.height());
- claimable_outpoints.push(commitment_package);
- self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
- // Although we aren't signing the transaction directly here, the transaction will be signed
- // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
- // new channel updates.
- self.holder_tx_signed = true;
- // We can't broadcast our HTLC transactions while the commitment transaction is
- // unconfirmed. We'll delay doing so until we detect the confirmed commitment in
- // `transactions_confirmed`.
- if !self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
- // Because we're broadcasting a commitment transaction, we should construct the package
- // assuming it gets confirmed in the next block. Sadly, we have code which considers
- // "not yet confirmed" things as discardable, so we cannot do that here.
- let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
- let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
- let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &unsigned_commitment_tx);
- if !new_outputs.is_empty() {
- watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
- }
- claimable_outpoints.append(&mut new_outpoints);
- }
+ let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs();
+ claimable_outpoints.append(&mut new_outpoints);
+ watch_outputs.append(&mut new_outputs);
}
// Find which on-chain events have reached their confirmation threshold.
pub script_pubkey: ScriptBuf,
}
-impl<T: Listen> Listen for core::ops::Deref<Target = T> {
+impl<T: Listen> Listen for dyn core::ops::Deref<Target = T> {
fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
(**self).filtered_block_connected(header, txdata, height);
}
None
}
+ pub fn abandon_claim(&mut self, outpoint: &BitcoinOutPoint) {
+ let claim_id = self.claimable_outpoints.get(outpoint).map(|(claim_id, _)| *claim_id)
+ .or_else(|| {
+ self.pending_claim_requests.iter()
+ .find(|(_, claim)| claim.outpoints().iter().any(|claim_outpoint| *claim_outpoint == outpoint))
+ .map(|(claim_id, _)| *claim_id)
+ });
+ if let Some(claim_id) = claim_id {
+ if let Some(claim) = self.pending_claim_requests.remove(&claim_id) {
+ for outpoint in claim.outpoints() {
+ self.claimable_outpoints.remove(&outpoint);
+ }
+ }
+ } else {
+ self.locktimed_packages.values_mut().for_each(|claims|
+ claims.retain(|claim| !claim.outpoints().iter().any(|claim_outpoint| *claim_outpoint == outpoint)));
+ }
+ }
+
/// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
/// for this channel, provide new relevant on-chain transactions and/or new claim requests.
/// Together with `update_claims_view_from_matched_txn` this used to be named
use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR;
use bitcoin::blockdata::locktime::absolute::LockTime;
use bitcoin::consensus::Encodable;
+use bitcoin::psbt::PartiallySignedTransaction;
use bitcoin::secp256k1;
use bitcoin::secp256k1::Secp256k1;
use bitcoin::secp256k1::ecdsa::Signature;
) -> Result<CoinSelection, ()>;
/// Signs and provides the full witness for all inputs within the transaction known to the
/// trait (i.e., any provided via [`CoinSelectionSource::select_confirmed_utxos`]).
- fn sign_tx(&self, tx: Transaction) -> Result<Transaction, ()>;
+ ///
+ /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the
+ /// unsigned transaction and then sign it with your wallet.
+ fn sign_psbt(&self, psbt: PartiallySignedTransaction) -> Result<Transaction, ()>;
}
/// An alternative to [`CoinSelectionSource`] that can be implemented and used along [`Wallet`] to
/// Signs and provides the full [`TxIn::script_sig`] and [`TxIn::witness`] for all inputs within
/// the transaction known to the wallet (i.e., any provided via
/// [`WalletSource::list_confirmed_utxos`]).
- fn sign_tx(&self, tx: Transaction) -> Result<Transaction, ()>;
+ ///
+ /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the
+ /// unsigned transaction and then sign it with your wallet.
+ fn sign_psbt(&self, psbt: PartiallySignedTransaction) -> Result<Transaction, ()>;
}
/// A wrapper over [`WalletSource`] that implements [`CoinSelection`] by preferring UTXOs that would
.or_else(|_| do_coin_selection(true, true))
}
- fn sign_tx(&self, tx: Transaction) -> Result<Transaction, ()> {
- self.source.sign_tx(tx)
+ fn sign_psbt(&self, psbt: PartiallySignedTransaction) -> Result<Transaction, ()> {
+ self.source.sign_psbt(psbt)
}
}
}
/// Updates a transaction with the result of a successful coin selection attempt.
- fn process_coin_selection(&self, tx: &mut Transaction, mut coin_selection: CoinSelection) {
- for utxo in coin_selection.confirmed_utxos.drain(..) {
+ fn process_coin_selection(&self, tx: &mut Transaction, coin_selection: &CoinSelection) {
+ for utxo in coin_selection.confirmed_utxos.iter() {
tx.input.push(TxIn {
previous_output: utxo.outpoint,
script_sig: ScriptBuf::new(),
witness: Witness::new(),
});
}
- if let Some(change_output) = coin_selection.change_output.take() {
+ if let Some(change_output) = coin_selection.change_output.clone() {
tx.output.push(change_output);
} else if tx.output.is_empty() {
// We weren't provided a change output, likely because the input set was a perfect
log_debug!(self.logger, "Peforming coin selection for commitment package (commitment and anchor transaction) targeting {} sat/kW",
package_target_feerate_sat_per_1000_weight);
- let coin_selection = self.utxo_source.select_confirmed_utxos(
+ let coin_selection: CoinSelection = self.utxo_source.select_confirmed_utxos(
claim_id, must_spend, &[], package_target_feerate_sat_per_1000_weight,
)?;
let total_input_amount = must_spend_amount +
coin_selection.confirmed_utxos.iter().map(|utxo| utxo.output.value).sum::<u64>();
- self.process_coin_selection(&mut anchor_tx, coin_selection);
+ self.process_coin_selection(&mut anchor_tx, &coin_selection);
let anchor_txid = anchor_tx.txid();
- debug_assert_eq!(anchor_tx.output.len(), 1);
+ // construct psbt
+ let mut anchor_psbt = PartiallySignedTransaction::from_unsigned_tx(anchor_tx).unwrap();
+ // add witness_utxo to anchor input
+ anchor_psbt.inputs[0].witness_utxo = Some(anchor_descriptor.previous_utxo());
+ // add witness_utxo to remaining inputs
+ for (idx, utxo) in coin_selection.confirmed_utxos.into_iter().enumerate() {
+ // add 1 to skip the anchor input
+ let index = idx + 1;
+ debug_assert_eq!(anchor_psbt.unsigned_tx.input[index].previous_output, utxo.outpoint);
+ if utxo.output.script_pubkey.is_witness_program() {
+ anchor_psbt.inputs[index].witness_utxo = Some(utxo.output);
+ }
+ }
+
+ debug_assert_eq!(anchor_psbt.unsigned_tx.output.len(), 1);
#[cfg(debug_assertions)]
- let unsigned_tx_weight = anchor_tx.weight().to_wu() - (anchor_tx.input.len() as u64 * EMPTY_SCRIPT_SIG_WEIGHT);
+ let unsigned_tx_weight = anchor_psbt.unsigned_tx.weight().to_wu() - (anchor_psbt.unsigned_tx.input.len() as u64 * EMPTY_SCRIPT_SIG_WEIGHT);
log_debug!(self.logger, "Signing anchor transaction {}", anchor_txid);
- anchor_tx = self.utxo_source.sign_tx(anchor_tx)?;
+ anchor_tx = self.utxo_source.sign_psbt(anchor_psbt)?;
let signer = anchor_descriptor.derive_channel_signer(&self.signer_provider);
let anchor_sig = signer.sign_holder_anchor_input(&anchor_tx, 0, &self.secp)?;
#[cfg(debug_assertions)]
let must_spend_amount = must_spend.iter().map(|input| input.previous_utxo.value).sum::<u64>();
- let coin_selection = self.utxo_source.select_confirmed_utxos(
+ let coin_selection: CoinSelection = self.utxo_source.select_confirmed_utxos(
claim_id, must_spend, &htlc_tx.output, target_feerate_sat_per_1000_weight,
)?;
let total_input_amount = must_spend_amount +
coin_selection.confirmed_utxos.iter().map(|utxo| utxo.output.value).sum::<u64>();
- self.process_coin_selection(&mut htlc_tx, coin_selection);
+ self.process_coin_selection(&mut htlc_tx, &coin_selection);
+
+ // construct psbt
+ let mut htlc_psbt = PartiallySignedTransaction::from_unsigned_tx(htlc_tx).unwrap();
+ // add witness_utxo to htlc inputs
+ for (i, htlc_descriptor) in htlc_descriptors.iter().enumerate() {
+ debug_assert_eq!(htlc_psbt.unsigned_tx.input[i].previous_output, htlc_descriptor.outpoint());
+ htlc_psbt.inputs[i].witness_utxo = Some(htlc_descriptor.previous_utxo(&self.secp));
+ }
+ // add witness_utxo to remaining inputs
+ for (idx, utxo) in coin_selection.confirmed_utxos.into_iter().enumerate() {
+ // offset to skip the htlc inputs
+ let index = idx + htlc_descriptors.len();
+ debug_assert_eq!(htlc_psbt.unsigned_tx.input[index].previous_output, utxo.outpoint);
+ if utxo.output.script_pubkey.is_witness_program() {
+ htlc_psbt.inputs[index].witness_utxo = Some(utxo.output);
+ }
+ }
#[cfg(debug_assertions)]
- let unsigned_tx_weight = htlc_tx.weight().to_wu() - (htlc_tx.input.len() as u64 * EMPTY_SCRIPT_SIG_WEIGHT);
+ let unsigned_tx_weight = htlc_psbt.unsigned_tx.weight().to_wu() - (htlc_psbt.unsigned_tx.input.len() as u64 * EMPTY_SCRIPT_SIG_WEIGHT);
- log_debug!(self.logger, "Signing HTLC transaction {}", htlc_tx.txid());
- htlc_tx = self.utxo_source.sign_tx(htlc_tx)?;
+ log_debug!(self.logger, "Signing HTLC transaction {}", htlc_psbt.unsigned_tx.txid());
+ htlc_tx = self.utxo_source.sign_psbt(htlc_psbt)?;
let mut signers = BTreeMap::new();
for (idx, htlc_descriptor) in htlc_descriptors.iter().enumerate() {
#![cfg_attr(not(any(test, fuzzing, feature = "_test_utils")), deny(missing_docs))]
#![cfg_attr(not(any(test, feature = "_test_utils")), forbid(unsafe_code))]
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
// In general, rust is absolutely horrid at supporting users doing things like,
// for example, compiling Rust code for real environments. Disable useless lints
// larger. If we don't know that time has moved forward, we can just set it to the last
// time we saw and it will be ignored.
let best_time = self.context.update_time_counter;
- match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
+ match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
}
- #[cfg(feature = "_test_vectors")]
+ #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
#[test]
fn outbound_commitment_test() {
use bitcoin::sighash;
// Test vectors from BOLT 3 Appendices C and F (anchors):
let feeest = TestFeeEstimator{fee_est: 15000};
- let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
+ let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
let secp_ctx = Secp256k1::new();
let mut signer = InMemorySigner::new(
// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
// LATENCY_GRACE_PERIOD_BLOCKS.
-#[deny(const_err)]
#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
// Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
// ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed.
-#[deny(const_err)]
#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
// 0.0.102+
for (_, monitor) in args.channel_monitors.iter() {
let counterparty_opt = id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id());
- let chan_id = monitor.get_funding_txo().0.to_channel_id();
if counterparty_opt.is_none() {
let logger = WithChannelMonitor::from(&args.logger, monitor);
for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
assert_eq!(node_txn.len(), 1);
+ mine_transaction(&nodes[1], &node_txn[0]);
+ if nodes[1].connect_style.borrow().updates_best_block_first() {
+ let _ = nodes[1].tx_broadcaster.txn_broadcast();
+ }
+
mine_transaction(&nodes[0], &node_txn[0]);
check_added_monitors!(nodes[0], 1);
test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 1);
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
// One pending HTLC is discarded by the force-close:
let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
// connect_style.
return;
}
- create_announced_chan_between_nodes(&nodes, 0, 1);
+ let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;
route_payment(&nodes[0], &[&nodes[1]], 10000000);
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
- let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(node_txn.len(), 3);
- assert_eq!(node_txn[0].txid(), node_txn[1].txid());
+ let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+ assert_eq!(node_txn.len(), 2);
+ check_spends!(node_txn[0], funding_tx);
+ check_spends!(node_txn[1], node_txn[0]);
- let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[1].clone()]);
+ let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]);
connect_block(&nodes[1], &block);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
- let tx = {
+ let commitment_tx = {
let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
// Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
// have a use for it unless nodes[2] learns the preimage somehow, the funds will go
node_txn.remove(0)
};
- mine_transaction(&nodes[1], &tx);
+ mine_transaction(&nodes[1], &commitment_tx);
// Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
check_closed_broadcast!(nodes[1], true);
get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
.provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
}
- mine_transaction(&nodes[2], &tx);
- let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
- assert_eq!(node_txn.len(), 1);
- assert_eq!(node_txn[0].input.len(), 1);
- assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
- assert_eq!(node_txn[0].lock_time, LockTime::ZERO); // Must be an HTLC-Success
- assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
+ mine_transaction(&nodes[2], &commitment_tx);
+ let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast();
+ assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
+ let htlc_tx = node_txn.pop().unwrap();
+ assert_eq!(htlc_tx.input.len(), 1);
+ assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.txid());
+ assert_eq!(htlc_tx.lock_time, LockTime::ZERO); // Must be an HTLC-Success
+ assert_eq!(htlc_tx.input[0].witness.len(), 5); // Must be an HTLC-Success
- check_spends!(node_txn[0], tx);
+ check_spends!(htlc_tx, commitment_tx);
}
#[test]
watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
// Watchtower Alice should have broadcast a commitment/HTLC-timeout
- let alice_state = {
+ {
let mut txn = alice_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 2);
- txn.remove(0)
+ check_spends!(txn[0], chan_1.3);
+ check_spends!(txn[1], txn[0]);
};
// Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
check_added_monitors(&nodes[0], 1);
{
let htlc_txn = alice_broadcaster.txn_broadcast();
- assert_eq!(htlc_txn.len(), 2);
+ assert_eq!(htlc_txn.len(), 1);
check_spends!(htlc_txn[0], bob_state_y);
- // Alice doesn't clean up the old HTLC claim since it hasn't seen a conflicting spend for
- // it. However, she should, because it now has an invalid parent.
- check_spends!(htlc_txn[1], alice_state);
}
}
assert_eq!(bob_txn.len(), 1);
check_spends!(bob_txn[0], txn_to_broadcast[0]);
} else {
- assert_eq!(bob_txn.len(), 2);
+ if nodes[1].connect_style.borrow().updates_best_block_first() {
+ assert_eq!(bob_txn.len(), 3);
+ assert_eq!(bob_txn[0].txid(), bob_txn[1].txid());
+ } else {
+ assert_eq!(bob_txn.len(), 2);
+ }
check_spends!(bob_txn[0], chan_ab.3);
}
}
// If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
// Bob force-closed and broadcasts the commitment transaction along with a
// HTLC-output-claiming transaction.
- let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+ let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
if broadcast_alice {
assert_eq!(bob_txn.len(), 1);
check_spends!(bob_txn[0], txn_to_broadcast[0]);
assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
} else {
- assert_eq!(bob_txn.len(), 2);
- check_spends!(bob_txn[1], txn_to_broadcast[0]);
- assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight);
+ assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 });
+ let htlc_tx = bob_txn.pop().unwrap();
+ check_spends!(htlc_tx, txn_to_broadcast[0]);
+ assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), script_weight);
}
}
}
// We should broadcast an HTLC transaction spending our funding transaction first
let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(spending_txn.len(), 2);
- assert_eq!(spending_txn[0].txid(), node_txn[0].txid());
- check_spends!(spending_txn[1], node_txn[0]);
+ let htlc_tx = if spending_txn[0].txid() == node_txn[0].txid() {
+ &spending_txn[1]
+ } else {
+ &spending_txn[0]
+ };
+ check_spends!(htlc_tx, node_txn[0]);
// We should also generate a SpendableOutputs event with the to_self output (as its
// timelock is up).
let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
// should immediately fail-backwards the HTLC to the previous hop, without waiting for an
// additional block built on top of the current chain.
nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
- &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
+ &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
check_added_monitors!(nodes[1], 1);
commitment_tx
};
let commitment_tx_conf_height_a = block_from_scid(&mine_transaction(&nodes[0], &commitment_tx));
- if anchors && nodes[0].connect_style.borrow().updates_best_block_first() {
+ if nodes[0].connect_style.borrow().updates_best_block_first() {
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
assert_eq!(txn[0].txid(), commitment_tx.txid());
};
mine_transaction(&nodes[0], &commitment_tx);
+ if nodes[0].connect_style.borrow().updates_best_block_first() {
+ let txn = nodes[0].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ assert_eq!(txn[0].txid(), commitment_tx.txid());
+ }
// Connect blocks until the HTLC's expiration is met, expecting a transaction broadcast.
connect_blocks(&nodes[0], TEST_FINAL_CLTV);
nodes[1].node.timer_tick_occurred();
check_added_monitors(&nodes[1], 2);
check_closed_event!(&nodes[1], 2, ClosureReason::OutdatedChannelManager, [nodes[0].node.get_our_node_id(); 2], 1000000);
- let (revoked_commitment_a, revoked_commitment_b) = {
- let txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
- assert_eq!(txn.len(), 2);
- assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
- assert_eq!(txn[1].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
- if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
- check_spends!(&txn[0], &chan_a.3);
- check_spends!(&txn[1], &chan_b.3);
- (txn[0].clone(), txn[1].clone())
- } else {
- check_spends!(&txn[1], &chan_a.3);
- check_spends!(&txn[0], &chan_b.3);
- (txn[1].clone(), txn[0].clone())
- }
- };
// Bob should now receive two events to bump his revoked commitment transaction fees.
assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
let events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
+ let mut revoked_commitment_txs = Vec::with_capacity(events.len());
let mut anchor_txs = Vec::with_capacity(events.len());
for (idx, event) in events.into_iter().enumerate() {
let utxo_value = Amount::ONE_BTC.to_sat() * (idx + 1) as u64;
};
let txn = nodes[1].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 2);
+ assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
+ if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
+ check_spends!(&txn[0], &chan_a.3);
+ } else {
+ check_spends!(&txn[0], &chan_b.3);
+ }
let (commitment_tx, anchor_tx) = (&txn[0], &txn[1]);
check_spends!(anchor_tx, coinbase_tx, commitment_tx);
+
+ revoked_commitment_txs.push(commitment_tx.clone());
anchor_txs.push(anchor_tx.clone());
};
for node in &nodes {
- mine_transactions(node, &[&revoked_commitment_a, &anchor_txs[0], &revoked_commitment_b, &anchor_txs[1]]);
+ mine_transactions(node, &[&revoked_commitment_txs[0], &anchor_txs[0], &revoked_commitment_txs[1], &anchor_txs[1]]);
}
check_added_monitors!(&nodes[0], 2);
check_closed_broadcast(&nodes[0], 2, true);
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(txn.len(), 4);
- let (revoked_htlc_claim_a, revoked_htlc_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_a.txid() {
+ let (revoked_htlc_claim_a, revoked_htlc_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_txs[0].txid() {
(if txn[0].input.len() == 2 { &txn[0] } else { &txn[1] }, if txn[2].input.len() == 2 { &txn[2] } else { &txn[3] })
} else {
(if txn[2].input.len() == 2 { &txn[2] } else { &txn[3] }, if txn[0].input.len() == 2 { &txn[0] } else { &txn[1] })
assert_eq!(revoked_htlc_claim_a.input.len(), 2); // Spends both HTLC outputs
assert_eq!(revoked_htlc_claim_a.output.len(), 1);
- check_spends!(revoked_htlc_claim_a, revoked_commitment_a);
+ check_spends!(revoked_htlc_claim_a, revoked_commitment_txs[0]);
assert_eq!(revoked_htlc_claim_b.input.len(), 2); // Spends both HTLC outputs
assert_eq!(revoked_htlc_claim_b.output.len(), 1);
- check_spends!(revoked_htlc_claim_b, revoked_commitment_b);
+ check_spends!(revoked_htlc_claim_b, revoked_commitment_txs[1]);
}
// Since Bob was able to confirm his revoked commitment, he'll now try to claim the HTLCs
sig
};
htlc_tx.input[0].witness = Witness::from_slice(&[fee_utxo_sig, public_key.to_bytes()]);
- check_spends!(htlc_tx, coinbase_tx, revoked_commitment_a, revoked_commitment_b);
+ check_spends!(htlc_tx, coinbase_tx, revoked_commitment_txs[0], revoked_commitment_txs[1]);
htlc_tx
};
).unwrap();
if let SpendableOutputDescriptor::StaticPaymentOutput(_) = &outputs[0] {
- check_spends!(spend_tx, &revoked_commitment_a, &revoked_commitment_b);
+ check_spends!(spend_tx, &revoked_commitment_txs[0], &revoked_commitment_txs[1]);
} else {
check_spends!(spend_tx, revoked_claim_transactions.get(&spend_tx.input[0].previous_output.txid).unwrap());
}
// If we update the best block to the new height before providing the confirmed transactions,
// we'll see another broadcast of the commitment transaction.
- if anchors && !confirm_counterparty_commitment && nodes[0].connect_style.borrow().updates_best_block_first() {
+ if !confirm_counterparty_commitment && nodes[0].connect_style.borrow().updates_best_block_first() {
let _ = nodes[0].tx_broadcaster.txn_broadcast();
}
let htlc_timeout_tx = {
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
- let tx = if txn[0].input[0].previous_output.txid == commitment_tx.txid() {
- txn[0].clone()
- } else {
- txn[1].clone()
- };
+ let tx = txn.pop().unwrap();
check_spends!(tx, commitment_tx, coinbase_tx);
tx
};
use core::ops::Deref;
/// Invalid inbound onion payment.
+#[derive(Debug)]
pub struct InboundOnionErr {
/// BOLT 4 error code.
pub err_code: u16,
mod tests {
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
- use bitcoin::secp256k1::{PublicKey, SecretKey};
+ use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::ChannelId;
use crate::ln::channelmanager::RecipientOnionFields;
use crate::routing::router::{Path, RouteHop};
use crate::util::test_utils;
+ #[test]
+ fn fail_construct_onion_on_too_big_payloads() {
+ // Ensure that if we call `construct_onion_packet` and friends where payloads are too large for
+ // the allotted packet length, we'll fail to construct. Previously, senders would happily
+ // construct invalid packets by array-shifting the final node's HMAC out of the packet when
+ // adding an intermediate onion layer, causing the receiver to error with "final payload
+ // provided for us as an intermediate node."
+ let secp_ctx = Secp256k1::new();
+ let bob = crate::sign::KeysManager::new(&[2; 32], 42, 42);
+ let bob_pk = PublicKey::from_secret_key(&secp_ctx, &bob.get_node_secret_key());
+ let charlie = crate::sign::KeysManager::new(&[3; 32], 42, 42);
+ let charlie_pk = PublicKey::from_secret_key(&secp_ctx, &charlie.get_node_secret_key());
+
+ let (
+ session_priv, total_amt_msat, cur_height, mut recipient_onion, keysend_preimage, payment_hash,
+ prng_seed, hops, ..
+ ) = payment_onion_args(bob_pk, charlie_pk);
+
+ // Ensure the onion will not fit all the payloads by adding a large custom TLV.
+ recipient_onion.custom_tlvs.push((13377331, vec![0; 1156]));
+
+ let path = Path { hops, blinded_tail: None, };
+ let onion_keys = super::onion_utils::construct_onion_keys(&secp_ctx, &path, &session_priv).unwrap();
+ let (onion_payloads, ..) = super::onion_utils::build_onion_payloads(
+ &path, total_amt_msat, recipient_onion, cur_height + 1, &Some(keysend_preimage)
+ ).unwrap();
+
+ assert!(super::onion_utils::construct_onion_packet(
+ onion_payloads, onion_keys, prng_seed, &payment_hash
+ ).is_err());
+ }
+
#[test]
fn test_peel_payment_onion() {
use super::*;
let mut pos = 0;
for (i, (payload, keys)) in payloads.iter().zip(onion_keys.iter()).enumerate() {
- if i == payloads.len() - 1 { break; }
-
let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
for _ in 0..(packet_data.len() - pos) { // TODO: Batch this.
let mut dummy = [0; 1];
return Err(());
}
+ if i == payloads.len() - 1 { break; }
+
res.resize(pos, 0u8);
chacha.process_in_place(&mut res);
}
if hmac == [0; 32] {
#[cfg(test)]
{
- // In tests, make sure that the initial onion packet data is, at least, non-0.
- // We could do some fancy randomness test here, but, ehh, whatever.
- // This checks for the issue where you can calculate the path length given the
- // onion data as all the path entries that the originator sent will be here
- // as-is (and were originally 0s).
- // Of course reverse path calculation is still pretty easy given naive routing
- // algorithms, but this fixes the most-obvious case.
- let mut next_bytes = [0; 32];
- chacha_stream.read_exact(&mut next_bytes).unwrap();
- assert_ne!(next_bytes[..], [0; 32][..]);
- chacha_stream.read_exact(&mut next_bytes).unwrap();
- assert_ne!(next_bytes[..], [0; 32][..]);
+ if chacha_stream.read.position() < hop_data.len() as u64 - 64 {
+ // In tests, make sure that the initial onion packet data is, at least, non-0.
+ // We could do some fancy randomness test here, but, ehh, whatever.
+ // This checks for the issue where you can calculate the path length given the
+ // onion data as all the path entries that the originator sent will be here
+ // as-is (and were originally 0s).
+ // Of course reverse path calculation is still pretty easy given naive routing
+ // algorithms, but this fixes the most-obvious case.
+ let mut next_bytes = [0; 32];
+ chacha_stream.read_exact(&mut next_bytes).unwrap();
+ assert_ne!(next_bytes[..], [0; 32][..]);
+ chacha_stream.read_exact(&mut next_bytes).unwrap();
+ assert_ne!(next_bytes[..], [0; 32][..]);
+ }
}
return Ok((msg, None)); // We are the final destination for this packet
} else {
use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, commit_tx_fee_msat, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI};
use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo};
use crate::ln::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures};
-use crate::ln::{msgs, ChannelId, PaymentSecret, PaymentPreimage};
+use crate::ln::{msgs, ChannelId, PaymentHash, PaymentSecret, PaymentPreimage};
use crate::ln::msgs::ChannelMessageHandler;
+use crate::ln::onion_utils;
use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, Retry};
use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters, find_route};
use crate::util::ser::Writeable;
use crate::util::string::UntrustedString;
+use bitcoin::hashes::Hash;
+use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::network::constants::Network;
+use bitcoin::secp256k1::{Secp256k1, SecretKey};
use crate::prelude::*;
+use crate::ln::functional_test_utils;
use crate::ln::functional_test_utils::*;
use crate::routing::gossip::NodeId;
#[cfg(feature = "std")]
let nodes_0_deserialized;
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+ let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
// Serialize the ChannelManager prior to sending payments
assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
mine_transaction(&nodes[1], &as_commitment_tx);
- let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(bs_htlc_claim_txn.len(), 1);
- check_spends!(bs_htlc_claim_txn[0], as_commitment_tx);
+ let bs_htlc_claim_txn = {
+ let mut txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
+ assert_eq!(txn.len(), 2);
+ check_spends!(txn[0], funding_tx);
+ check_spends!(txn[1], as_commitment_tx);
+ txn.pop().unwrap()
+ };
if !confirm_before_reload {
mine_transaction(&nodes[0], &as_commitment_tx);
+ let txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ assert_eq!(txn[0].txid(), as_commitment_tx.txid());
}
- mine_transaction(&nodes[0], &bs_htlc_claim_txn[0]);
+ mine_transaction(&nodes[0], &bs_htlc_claim_txn);
expect_payment_sent(&nodes[0], payment_preimage_1, None, true, false);
connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20);
let (first_htlc_timeout_tx, second_htlc_timeout_tx) = {
};
check_spends!(first_htlc_timeout_tx, as_commitment_tx);
check_spends!(second_htlc_timeout_tx, as_commitment_tx);
- if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output {
+ if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn.input[0].previous_output {
confirm_transaction(&nodes[0], &second_htlc_timeout_tx);
} else {
confirm_transaction(&nodes[0], &first_htlc_timeout_tx);
// the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved
// after the commitment transaction, so always connect the commitment transaction.
mine_transaction(&nodes[0], &bs_commitment_tx[0]);
+ if nodes[0].connect_style.borrow().updates_best_block_first() {
+ let _ = nodes[0].tx_broadcaster.txn_broadcast();
+ }
mine_transaction(&nodes[1], &bs_commitment_tx[0]);
if !use_dust {
connect_blocks(&nodes[0], TEST_FINAL_CLTV + (MIN_CLTV_EXPIRY_DELTA as u32));
connect_blocks(&nodes[1], TEST_FINAL_CLTV + (MIN_CLTV_EXPIRY_DELTA as u32));
let as_htlc_timeout = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- check_spends!(as_htlc_timeout[0], bs_commitment_tx[0]);
assert_eq!(as_htlc_timeout.len(), 1);
+ check_spends!(as_htlc_timeout[0], bs_commitment_tx[0]);
mine_transaction(&nodes[0], &as_htlc_timeout[0]);
- // nodes[0] may rebroadcast (or RBF-bump) its HTLC-Timeout, so wipe the announced set.
- nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
mine_transaction(&nodes[1], &as_htlc_timeout[0]);
}
+ if nodes[0].connect_style.borrow().updates_best_block_first() {
+ let _ = nodes[0].tx_broadcaster.txn_broadcast();
+ }
// Create a new channel on which to retry the payment before we fail the payment via the
// HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
// Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
- let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(node_txn.len(), 3);
- assert_eq!(node_txn[0].txid(), node_txn[1].txid());
- check_spends!(node_txn[1], funding_tx);
- check_spends!(node_txn[2], node_txn[1]);
- let timeout_txn = vec![node_txn[2].clone()];
+ let (commitment_tx, htlc_timeout_tx) = {
+ let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+ assert_eq!(txn.len(), 2);
+ check_spends!(txn[0], funding_tx);
+ check_spends!(txn[1], txn[0]);
+ (txn.remove(0), txn.remove(0))
+ };
nodes[1].node.claim_funds(payment_preimage);
check_added_monitors!(nodes[1], 1);
expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
- connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[1].clone()]));
+ mine_transaction(&nodes[1], &commitment_tx);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
- let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(claim_txn.len(), 1);
- check_spends!(claim_txn[0], node_txn[1]);
+ let htlc_success_tx = {
+ let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], commitment_tx);
+ txn.pop().unwrap()
+ };
- connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[1].clone()]));
+ mine_transaction(&nodes[0], &commitment_tx);
if confirm_commitment_tx {
connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
}
- let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, if payment_timeout { timeout_txn } else { vec![claim_txn[0].clone()] });
+ let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, if payment_timeout { vec![htlc_timeout_tx] } else { vec![htlc_success_tx] });
if payment_timeout {
assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!
// We really want std::thread::scope, but its not stable until 1.63. Until then, we get unsafe.
let node_ref = NodePtr::from_node(&nodes[0]);
move || {
+ let _ = &node_ref;
let node_a = unsafe { &*node_ref.0 };
while Instant::now() < end_time {
node_a.node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable
check_closed_broadcast(&nodes[2], 1, true);
check_added_monitors(&nodes[2], 1);
}
+
+#[test]
+fn peel_payment_onion_custom_tlvs() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ let secp_ctx = Secp256k1::new();
+
+ let amt_msat = 1000;
+ let payment_params = PaymentParameters::for_keysend(nodes[1].node.get_our_node_id(),
+ TEST_FINAL_CLTV, false);
+ let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
+ let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap();
+ let mut recipient_onion = RecipientOnionFields::spontaneous_empty()
+ .with_custom_tlvs(vec![(414141, vec![42; 1200])]).unwrap();
+ let prng_seed = chanmon_cfgs[0].keys_manager.get_secure_random_bytes();
+ let session_priv = SecretKey::from_slice(&prng_seed[..]).expect("RNG is busted");
+ let keysend_preimage = PaymentPreimage([42; 32]);
+ let payment_hash = PaymentHash(Sha256::hash(&keysend_preimage.0).to_byte_array());
+
+ let (onion_routing_packet, first_hop_msat, cltv_expiry) = onion_utils::create_payment_onion(
+ &secp_ctx, &route.paths[0], &session_priv, amt_msat, recipient_onion.clone(),
+ nodes[0].best_block_info().1, &payment_hash, &Some(keysend_preimage), prng_seed
+ ).unwrap();
+
+ let update_add = msgs::UpdateAddHTLC {
+ channel_id: ChannelId([0; 32]),
+ htlc_id: 42,
+ amount_msat: first_hop_msat,
+ payment_hash,
+ cltv_expiry,
+ skimmed_fee_msat: None,
+ onion_routing_packet,
+ blinding_point: None,
+ };
+ let peeled_onion = crate::ln::onion_payment::peel_payment_onion(
+ &update_add, &&chanmon_cfgs[1].keys_manager, &&chanmon_cfgs[1].logger, &secp_ctx,
+ nodes[1].best_block_info().1, true, false
+ ).unwrap();
+ assert_eq!(peeled_onion.incoming_amt_msat, Some(amt_msat));
+ match peeled_onion.routing {
+ PendingHTLCRouting::ReceiveKeysend {
+ payment_data, payment_metadata, custom_tlvs, ..
+ } => {
+ #[cfg(not(c_bindings))]
+ assert_eq!(&custom_tlvs, recipient_onion.custom_tlvs());
+ #[cfg(c_bindings)]
+ assert_eq!(custom_tlvs, recipient_onion.custom_tlvs());
+ assert!(payment_metadata.is_none());
+ assert!(payment_data.is_none());
+ },
+ _ => panic!()
+ }
+}
// broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
// message...
const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (SocketAddress::MAX_LEN as u32 + 1) / 2;
- #[deny(const_err)]
#[allow(dead_code)]
// ...by failing to compile if the number of addresses that would be half of a message is
// smaller than 100:
confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
} else {
connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1 + 1);
- let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(bs_htlc_timeout_tx.len(), 1);
- confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
+ let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
+ let bs_htlc_timeout_tx = txn.pop().unwrap();
+ confirm_transaction(&nodes[1], &bs_htlc_timeout_tx);
}
} else {
confirm_transaction(&nodes[1], &bs_commitment_tx[0]);
mine_transaction(&nodes[0], &commitment_tx_b);
mine_transaction(&nodes[1], &commitment_tx_b);
+ if nodes[1].connect_style.borrow().updates_best_block_first() {
+ let _ = nodes[1].tx_broadcaster.txn_broadcast();
+ }
// Provide the preimage now, such that we only claim from the holder commitment (since it's
// currently confirmed) and not the counterparty's.
// commitment (still unrevoked) is the currently confirmed closing transaction.
assert_eq!(htlc_preimage_tx.input[0].witness.second_to_last().unwrap(), &payment_preimage.0[..]);
}
+
+fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_counterparty_commitment: bool) {
+ // Tests that a node will retry broadcasting its own commitment after seeing a confirmed
+ // counterparty commitment be reorged out.
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ if revoked_counterparty_commitment {
+ chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
+ }
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let mut config = test_default_channel_config();
+ if anchors {
+ config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
+ config.manually_accept_inbound_channels = true;
+ }
+ let persister;
+ let new_chain_monitor;
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
+ let nodes_1_deserialized;
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ // Route a payment so we have an HTLC to claim as well.
+ let _ = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+
+ if revoked_counterparty_commitment {
+ // Trigger a fee update such that we advance the state. We will have B broadcast its state
+ // without the fee update.
+ let serialized_node = nodes[1].node.encode();
+ let serialized_monitor = get_monitor!(nodes[1], chan_id).encode();
+
+ *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap() += 1;
+ nodes[0].node.timer_tick_occurred();
+ check_added_monitors!(nodes[0], 1);
+
+ let fee_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &fee_update.update_fee.unwrap());
+ commitment_signed_dance!(nodes[1], nodes[0], fee_update.commitment_signed, false);
+
+ reload_node!(
+ nodes[1], config, &serialized_node, &[&serialized_monitor], persister, new_chain_monitor, nodes_1_deserialized
+ );
+ }
+
+ // Connect blocks until the HTLC expiry is met, prompting a commitment broadcast by A.
+ connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
+ check_closed_broadcast(&nodes[0], 1, true);
+ check_added_monitors(&nodes[0], 1);
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
+
+ {
+ let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
+ if anchors {
+ assert_eq!(txn.len(), 1);
+ let commitment_tx_a = txn.pop().unwrap();
+ check_spends!(commitment_tx_a, funding_tx);
+ } else {
+ assert_eq!(txn.len(), 2);
+ let htlc_tx_a = txn.pop().unwrap();
+ let commitment_tx_a = txn.pop().unwrap();
+ check_spends!(commitment_tx_a, funding_tx);
+ check_spends!(htlc_tx_a, commitment_tx_a);
+ }
+ };
+
+ // B will also broadcast its own commitment.
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id()).unwrap();
+ check_closed_broadcast(&nodes[1], 1, true);
+ check_added_monitors(&nodes[1], 1);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100_000);
+
+ let commitment_b = {
+ let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ let tx = txn.pop().unwrap();
+ check_spends!(tx, funding_tx);
+ tx
+ };
+
+ // Confirm B's commitment, A should now broadcast an HTLC timeout for commitment B.
+ mine_transaction(&nodes[0], &commitment_b);
+ {
+ let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
+ if nodes[0].connect_style.borrow().updates_best_block_first() {
+ // `commitment_a` and `htlc_timeout_a` are rebroadcast because the best block was
+ // updated prior to seeing `commitment_b`.
+ assert_eq!(txn.len(), if anchors { 2 } else { 3 });
+ check_spends!(txn.last().unwrap(), commitment_b);
+ } else {
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], commitment_b);
+ }
+ }
+
+ // Disconnect the block, allowing A to retry its own commitment. Note that we connect two
+ // blocks, one to get us back to the original height, and another to retry our pending claims.
+ disconnect_blocks(&nodes[0], 1);
+ connect_blocks(&nodes[0], 2);
+ {
+ let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+ if anchors {
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], funding_tx);
+ } else {
+ assert_eq!(txn.len(), 2);
+ check_spends!(txn[0], txn[1]); // HTLC timeout A
+ check_spends!(txn[1], funding_tx); // Commitment A
+ assert_ne!(txn[1].txid(), commitment_b.txid());
+ }
+ }
+}
+
+#[test]
+fn test_retries_own_commitment_broadcast_after_reorg() {
+ do_test_retries_own_commitment_broadcast_after_reorg(false, false);
+ do_test_retries_own_commitment_broadcast_after_reorg(false, true);
+ do_test_retries_own_commitment_broadcast_after_reorg(true, false);
+ do_test_retries_own_commitment_broadcast_after_reorg(true, true);
+}
fn peer_disconnected(&self, their_node_id: &PublicKey) {
match self.message_recipients.lock().unwrap().remove(their_node_id) {
Some(OnionMessageRecipient::ConnectedPeer(..)) => {},
- _ => debug_assert!(false),
+ Some(_) => debug_assert!(false),
+ None => {},
}
}
pub struct DirectedChannelInfo<'a> {
channel: &'a ChannelInfo,
direction: &'a ChannelUpdateInfo,
- htlc_maximum_msat: u64,
- effective_capacity: EffectiveCapacity,
- /// Outbound from the perspective of `node_one`.
- ///
- /// If true, the channel is considered to be outbound from `node_one` perspective.
- /// If false, the channel is considered to be outbound from `node_two` perspective.
- ///
- /// [`ChannelInfo::node_one`]
- /// [`ChannelInfo::node_two`]
- outbound: bool,
+ /// The direction this channel is in - if set, it indicates that we're traversing the channel
+ /// from [`ChannelInfo::node_one`] to [`ChannelInfo::node_two`].
+ from_node_one: bool,
}
impl<'a> DirectedChannelInfo<'a> {
#[inline]
- fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, outbound: bool) -> Self {
- let mut htlc_maximum_msat = direction.htlc_maximum_msat;
- let capacity_msat = channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000);
-
- let effective_capacity = match capacity_msat {
- Some(capacity_msat) => {
- htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat);
- EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat }
- },
- None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat },
- };
-
- Self {
- channel, direction, htlc_maximum_msat, effective_capacity, outbound
- }
+ fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, from_node_one: bool) -> Self {
+ Self { channel, direction, from_node_one }
}
/// Returns information for the channel.
#[inline]
pub fn channel(&self) -> &'a ChannelInfo { self.channel }
- /// Returns the maximum HTLC amount allowed over the channel in the direction.
- #[inline]
- pub fn htlc_maximum_msat(&self) -> u64 {
- self.htlc_maximum_msat
- }
-
/// Returns the [`EffectiveCapacity`] of the channel in the direction.
///
/// This is either the total capacity from the funding transaction, if known, or the
/// `htlc_maximum_msat` for the direction as advertised by the gossip network, if known,
/// otherwise.
+ #[inline]
pub fn effective_capacity(&self) -> EffectiveCapacity {
- self.effective_capacity
+ let mut htlc_maximum_msat = self.direction().htlc_maximum_msat;
+ let capacity_msat = self.channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000);
+
+ match capacity_msat {
+ Some(capacity_msat) => {
+ htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat);
+ EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat }
+ },
+ None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat },
+ }
}
/// Returns information for the direction.
/// Returns the `node_id` of the source hop.
///
/// Refers to the `node_id` forwarding the payment to the next hop.
- pub(super) fn source(&self) -> &'a NodeId { if self.outbound { &self.channel.node_one } else { &self.channel.node_two } }
+ #[inline]
+ pub(super) fn source(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_one } else { &self.channel.node_two } }
/// Returns the `node_id` of the target hop.
///
/// Refers to the `node_id` receiving the payment from the previous hop.
- pub(super) fn target(&self) -> &'a NodeId { if self.outbound { &self.channel.node_two } else { &self.channel.node_one } }
+ #[inline]
+ pub(super) fn target(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_two } else { &self.channel.node_one } }
}
impl<'a> fmt::Debug for DirectedChannelInfo<'a> {
}
/// A channel descriptor for a hop along a payment path.
+///
+/// While this generally comes from BOLT 11's `r` field, this struct includes more fields than are
+/// available in BOLT 11. Thus, encoding and decoding this via `lightning-invoice` is lossy, as
+/// fields not supported in BOLT 11 will be stripped.
#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct RouteHintHop {
/// The node_id of the non-target end of the route
});
#[derive(Eq, PartialEq)]
+#[repr(align(64))] // Force the size to 64 bytes
struct RouteGraphNode {
node_id: NodeId,
- lowest_fee_to_node: u64,
- total_cltv_delta: u32,
+ score: u64,
// The maximum value a yet-to-be-constructed payment path might flow through this node.
// This value is upper-bounded by us by:
// - how much is needed for a path being constructed
// - how much value can channels following this node (up to the destination) can contribute,
// considering their capacity and fees
value_contribution_msat: u64,
- /// The effective htlc_minimum_msat at this hop. If a later hop on the path had a higher HTLC
- /// minimum, we use it, plus the fees required at each earlier hop to meet it.
- path_htlc_minimum_msat: u64,
- /// All penalties incurred from this hop on the way to the destination, as calculated using
- /// channel scoring.
- path_penalty_msat: u64,
+ total_cltv_delta: u32,
/// The number of hops walked up to this node.
path_length_to_node: u8,
}
impl cmp::Ord for RouteGraphNode {
fn cmp(&self, other: &RouteGraphNode) -> cmp::Ordering {
- let other_score = cmp::max(other.lowest_fee_to_node, other.path_htlc_minimum_msat)
- .saturating_add(other.path_penalty_msat);
- let self_score = cmp::max(self.lowest_fee_to_node, self.path_htlc_minimum_msat)
- .saturating_add(self.path_penalty_msat);
- other_score.cmp(&self_score).then_with(|| other.node_id.cmp(&self.node_id))
+ other.score.cmp(&self.score).then_with(|| other.node_id.cmp(&self.node_id))
}
}
}
}
+// While RouteGraphNode can be laid out with fewer bytes, performance appears to be improved
+// substantially when it is laid out at exactly 64 bytes.
+//
+// Thus, we use `#[repr(C)]` on the struct to force a suboptimal layout and check that it stays 64
+// bytes here.
+#[cfg(any(ldk_bench, not(any(test, fuzzing))))]
+const _GRAPH_NODE_SMALL: usize = 64 - core::mem::size_of::<RouteGraphNode>();
+#[cfg(any(ldk_bench, not(any(test, fuzzing))))]
+const _GRAPH_NODE_FIXED_SIZE: usize = core::mem::size_of::<RouteGraphNode>() - 64;
+
/// A wrapper around the various hop representations.
///
/// Can be used to examine the properties of a hop,
/// A hop from the payer, where the outbound liquidity is known.
FirstHop {
/// Channel details of the first hop
- /// [`ChannelDetails::get_outbound_payment_scid`] is assumed
- /// to always return `Some(scid)`
- /// this assumption is checked in [`find_route`] method.
- details: &'a ChannelDetails,
- /// The node id of the payer.
///
- /// Can be accessed via `source` method.
- node_id: NodeId
+ /// [`ChannelDetails::get_outbound_payment_scid`] MUST be `Some` (indicating the channel
+ /// has been funded and is able to pay), and accessor methods may panic otherwise.
+ ///
+ /// [`find_route`] validates this prior to constructing a [`CandidateRouteHop`].
+ details: &'a ChannelDetails,
+ /// The node id of the payer, which is also the source side of this candidate route hop.
+ payer_node_id: &'a NodeId,
},
- /// A hop found in the [`ReadOnlyNetworkGraph`],
- /// where the channel capacity may be unknown.
+ /// A hop found in the [`ReadOnlyNetworkGraph`].
PublicHop {
- /// channel info of the hop.
+ /// Information about the channel, including potentially its capacity and
+ /// direction-specific information.
info: DirectedChannelInfo<'a>,
- /// short_channel_id of the channel.
+ /// The short channel ID of the channel, i.e. the identifier by which we refer to this
+ /// channel.
short_channel_id: u64,
},
- /// A hop to the payee found in the BOLT 11 payment invoice,
- /// though not necessarily a direct
- /// channel.
+ /// A private hop communicated by the payee, generally via a BOLT 11 invoice.
+ ///
+ /// Because BOLT 11 route hints can take multiple hops to get to the destination, this may not
+ /// terminate at the payee.
PrivateHop {
- /// Hint provides information about a private hop,
- /// needed while routing through a private
- /// channel.
+ /// Information about the private hop communicated via BOLT 11.
hint: &'a RouteHintHop,
- /// Node id of the next hop in route.
- target_node_id: NodeId
+ /// Node id of the next hop in BOLT 11 route hint.
+ target_node_id: &'a NodeId
},
- /// The payee's identity is concealed behind
- /// blinded paths provided in a BOLT 12 invoice.
+ /// A blinded path which starts with an introduction point and ultimately terminates with the
+ /// payee.
+ ///
+ /// Because we don't know the payee's identity, [`CandidateRouteHop::target`] will return
+ /// `None` in this state.
+ ///
+ /// Because blinded paths are "all or nothing", and we cannot use just one part of a blinded
+ /// path, the full path is treated as a single [`CandidateRouteHop`].
Blinded {
- /// Hint provides information about a blinded hop,
- /// needed while routing through a blinded path.
- /// `BlindedPayInfo` provides information needed about the
- /// payment while routing through a blinded path.
- /// `BlindedPath` is the blinded path to the destination.
+ /// Information about the blinded path including the fee, HTLC amount limits, and
+ /// cryptographic material required to build an HTLC through the given path.
hint: &'a (BlindedPayInfo, BlindedPath),
/// Index of the hint in the original list of blinded hints.
- /// Provided to uniquely identify a hop as we are
- /// route building.
+ ///
+ /// This is used to cheaply uniquely identify this blinded path, even though we don't have
+ /// a short channel ID for this hop.
hint_idx: usize,
},
- /// Similar to [`Self::Blinded`], but the path here
- /// has 1 blinded hop. `BlindedPayInfo` provided
- /// for 1-hop blinded paths is ignored
- /// because it is meant to apply to the hops *between* the
- /// introduction node and the destination.
- /// Useful for tracking that we need to include a blinded
- /// path at the end of our [`Route`].
+ /// Similar to [`Self::Blinded`], but the path here only has one hop.
+ ///
+ /// While we treat this similarly to [`CandidateRouteHop::Blinded`] in many respects (e.g.
+ /// returning `None` from [`CandidateRouteHop::target`]), in this case we do actually know the
+ /// payee's identity - it's the introduction point!
+ ///
+ /// [`BlindedPayInfo`] provided for 1-hop blinded paths is ignored because it is meant to apply
+ /// to the hops *between* the introduction node and the destination.
+ ///
+ /// This primarily exists to track that we need to included a blinded path at the end of our
+ /// [`Route`], even though it doesn't actually add an additional hop in the payment.
OneHopBlinded {
- /// Hint provides information about a single blinded hop,
- /// needed while routing through a one hop blinded path.
- /// `BlindedPayInfo` is ignored here.
- /// `BlindedPath` is the blinded path to the destination.
+ /// Information about the blinded path including the fee, HTLC amount limits, and
+ /// cryptographic material required to build an HTLC terminating with the given path.
+ ///
+ /// Note that the [`BlindedPayInfo`] is ignored here.
hint: &'a (BlindedPayInfo, BlindedPath),
/// Index of the hint in the original list of blinded hints.
- /// Provided to uniquely identify a hop as we are route building.
+ ///
+ /// This is used to cheaply uniquely identify this blinded path, even though we don't have
+ /// a short channel ID for this hop.
hint_idx: usize,
},
}
impl<'a> CandidateRouteHop<'a> {
- /// Returns short_channel_id if known.
- /// For `FirstHop` we assume [`ChannelDetails::get_outbound_payment_scid`] is always set, this assumption is checked in
- /// [`find_route`] method.
- /// For `Blinded` and `OneHopBlinded` we return `None` because next hop is not known.
- pub fn short_channel_id(&self) -> Option<u64> {
+ /// Returns the short channel ID for this hop, if one is known.
+ ///
+ /// This SCID could be an alias or a globally unique SCID, and thus is only expected to
+ /// uniquely identify this channel in conjunction with the [`CandidateRouteHop::source`].
+ ///
+ /// Returns `Some` as long as the candidate is a [`CandidateRouteHop::PublicHop`], a
+ /// [`CandidateRouteHop::PrivateHop`] from a BOLT 11 route hint, or a
+ /// [`CandidateRouteHop::FirstHop`] with a known [`ChannelDetails::get_outbound_payment_scid`]
+ /// (which is always true for channels which are funded and ready for use).
+ ///
+ /// In other words, this should always return `Some` as long as the candidate hop is not a
+ /// [`CandidateRouteHop::Blinded`] or a [`CandidateRouteHop::OneHopBlinded`].
+ ///
+ /// Note that this is deliberately not public as it is somewhat of a footgun because it doesn't
+ /// define a global namespace.
+ #[inline]
+ fn short_channel_id(&self) -> Option<u64> {
match self {
CandidateRouteHop::FirstHop { details, .. } => details.get_outbound_payment_scid(),
CandidateRouteHop::PublicHop { short_channel_id, .. } => Some(*short_channel_id),
}
}
+ /// Returns the globally unique short channel ID for this hop, if one is known.
+ ///
+ /// This only returns `Some` if the channel is public (either our own, or one we've learned
+ /// from the public network graph), and thus the short channel ID we have for this channel is
+ /// globally unique and identifies this channel in a global namespace.
+ #[inline]
+ pub fn globally_unique_short_channel_id(&self) -> Option<u64> {
+ match self {
+ CandidateRouteHop::FirstHop { details, .. } => if details.is_public { details.short_channel_id } else { None },
+ CandidateRouteHop::PublicHop { short_channel_id, .. } => Some(*short_channel_id),
+ CandidateRouteHop::PrivateHop { .. } => None,
+ CandidateRouteHop::Blinded { .. } => None,
+ CandidateRouteHop::OneHopBlinded { .. } => None,
+ }
+ }
+
// NOTE: This may alloc memory so avoid calling it in a hot code path.
fn features(&self) -> ChannelFeatures {
match self {
}
}
- /// Returns cltv_expiry_delta for this hop.
+ /// Returns the required difference in HTLC CLTV expiry between the [`Self::source`] and the
+ /// next-hop for an HTLC taking this hop.
+ ///
+ /// This is the time that the node(s) in this hop have to claim the HTLC on-chain if the
+ /// next-hop goes on chain with a payment preimage.
+ #[inline]
pub fn cltv_expiry_delta(&self) -> u32 {
match self {
CandidateRouteHop::FirstHop { .. } => 0,
}
}
- /// Returns the htlc_minimum_msat for this hop.
+ /// Returns the minimum amount that can be sent over this hop, in millisatoshis.
+ #[inline]
pub fn htlc_minimum_msat(&self) -> u64 {
match self {
CandidateRouteHop::FirstHop { details, .. } => details.next_outbound_htlc_minimum_msat,
}
}
- /// Returns the fees for this hop.
+ /// Returns the fees that must be paid to route an HTLC over this channel.
+ #[inline]
pub fn fees(&self) -> RoutingFees {
match self {
CandidateRouteHop::FirstHop { .. } => RoutingFees {
}
}
+ /// Fetch the effective capacity of this hop.
+ ///
+ /// Note that this may be somewhat expensive, so calls to this should be limited and results
+ /// cached!
fn effective_capacity(&self) -> EffectiveCapacity {
match self {
CandidateRouteHop::FirstHop { details, .. } => EffectiveCapacity::ExactLiquidity {
}
}
- /// Returns the id of this hop.
- /// For `Blinded` and `OneHopBlinded` we return `CandidateHopId::Blinded` with `hint_idx` because we don't know the channel id.
- /// For any other option we return `CandidateHopId::Clear` because we know the channel id and the direction.
- pub fn id(&self) -> CandidateHopId {
+ /// Returns an ID describing the given hop.
+ ///
+ /// See the docs on [`CandidateHopId`] for when this is, or is not, unique.
+ #[inline]
+ fn id(&self) -> CandidateHopId {
match self {
CandidateRouteHop::Blinded { hint_idx, .. } => CandidateHopId::Blinded(*hint_idx),
CandidateRouteHop::OneHopBlinded { hint_idx, .. } => CandidateHopId::Blinded(*hint_idx),
}
/// Returns the source node id of current hop.
///
- /// Source node id refers to the hop forwarding the payment.
+ /// Source node id refers to the node forwarding the HTLC through this hop.
///
- /// For `FirstHop` we return payer's node id.
+ /// For [`Self::FirstHop`] we return payer's node id.
+ #[inline]
pub fn source(&self) -> NodeId {
match self {
- CandidateRouteHop::FirstHop { node_id, .. } => *node_id,
+ CandidateRouteHop::FirstHop { payer_node_id, .. } => **payer_node_id,
CandidateRouteHop::PublicHop { info, .. } => *info.source(),
CandidateRouteHop::PrivateHop { hint, .. } => hint.src_node_id.into(),
CandidateRouteHop::Blinded { hint, .. } => hint.1.introduction_node_id.into(),
}
/// Returns the target node id of this hop, if known.
///
- /// Target node id refers to the hop receiving the payment.
+ /// Target node id refers to the node receiving the HTLC after this hop.
+ ///
+ /// For [`Self::Blinded`] we return `None` because the ultimate destination after the blinded
+ /// path is unknown.
///
- /// For `Blinded` and `OneHopBlinded` we return `None` because next hop is blinded.
- pub fn target(&self) -> Option<NodeId> {
+ /// For [`Self::OneHopBlinded`] we return `None` because the target is the same as the source,
+ /// and such a return value would be somewhat nonsensical.
+ #[inline]
+ pub fn target(&self) -> Option<NodeId> {
match self {
CandidateRouteHop::FirstHop { details, .. } => Some(details.counterparty.node_id.into()),
CandidateRouteHop::PublicHop { info, .. } => Some(*info.target()),
- CandidateRouteHop::PrivateHop { target_node_id, .. } => Some(*target_node_id),
+ CandidateRouteHop::PrivateHop { target_node_id, .. } => Some(**target_node_id),
CandidateRouteHop::Blinded { .. } => None,
CandidateRouteHop::OneHopBlinded { .. } => None,
}
}
}
-/// A wrapper around the various hop id representations.
+/// A unique(ish) identifier for a specific [`CandidateRouteHop`].
+///
+/// For blinded paths, this ID is unique only within a given [`find_route`] call.
+///
+/// For other hops, because SCIDs between private channels and public channels can conflict, this
+/// isn't guaranteed to be unique at all.
///
-/// `CandidateHopId::Clear` is used to identify a hop with a known short channel id and direction.
-/// `CandidateHopId::Blinded` is used to identify a blinded hop `hint_idx`.
+/// For our uses, this is generally fine, but it is not public as it is otherwise a rather
+/// difficult-to-use API.
#[derive(Clone, Copy, Eq, Hash, Ord, PartialOrd, PartialEq)]
-pub enum CandidateHopId {
+enum CandidateHopId {
/// Contains (scid, src_node_id < target_node_id)
Clear((u64, bool)),
/// Index of the blinded route hint in [`Payee::Blinded::route_hints`].
/// Fee values should be updated only in the context of the whole path, see update_value_and_recompute_fees.
/// These fee values are useful to choose hops as we traverse the graph "payee-to-payer".
#[derive(Clone)]
+#[repr(C)] // Force fields to appear in the order we define them.
struct PathBuildingHop<'a> {
candidate: CandidateRouteHop<'a>,
- fee_msat: u64,
-
- /// All the fees paid *after* this channel on the way to the destination
- next_hops_fee_msat: u64,
- /// Fee paid for the use of the current channel (see candidate.fees()).
- /// The value will be actually deducted from the counterparty balance on the previous link.
- hop_use_fee_msat: u64,
+ /// If we've already processed a node as the best node, we shouldn't process it again. Normally
+ /// we'd just ignore it if we did as all channels would have a higher new fee, but because we
+ /// may decrease the amounts in use as we walk the graph, the actual calculated fee may
+ /// decrease as well. Thus, we have to explicitly track which nodes have been processed and
+ /// avoid processing them again.
+ was_processed: bool,
/// Used to compare channels when choosing the for routing.
/// Includes paying for the use of a hop and the following hops, as well as
/// an estimated cost of reaching this hop.
/// All penalties incurred from this channel on the way to the destination, as calculated using
/// channel scoring.
path_penalty_msat: u64,
- /// If we've already processed a node as the best node, we shouldn't process it again. Normally
- /// we'd just ignore it if we did as all channels would have a higher new fee, but because we
- /// may decrease the amounts in use as we walk the graph, the actual calculated fee may
- /// decrease as well. Thus, we have to explicitly track which nodes have been processed and
- /// avoid processing them again.
- was_processed: bool,
+
+ // The last 16 bytes are on the next cache line by default in glibc's malloc. Thus, we should
+ // only place fields which are not hot there. Luckily, the next three fields are only read if
+ // we end up on the selected path, and only in the final path layout phase, so we don't care
+ // too much if reading them is slow.
+
+ fee_msat: u64,
+
+ /// All the fees paid *after* this channel on the way to the destination
+ next_hops_fee_msat: u64,
+ /// Fee paid for the use of the current channel (see candidate.fees()).
+ /// The value will be actually deducted from the counterparty balance on the previous link.
+ hop_use_fee_msat: u64,
+
#[cfg(all(not(ldk_bench), any(test, fuzzing)))]
// In tests, we apply further sanity checks on cases where we skip nodes we already processed
// to ensure it is specifically in cases where the fee has gone down because of a decrease in
value_contribution_msat: u64,
}
+// Checks that the entries in the `find_route` `dist` map fit in (exactly) two standard x86-64
+// cache lines. Sadly, they're not guaranteed to actually lie on a cache line (and in fact,
+// generally won't, because at least glibc's malloc will align to a nice, big, round
+// boundary...plus 16), but at least it will reduce the amount of data we'll need to load.
+//
+// Note that these assertions only pass on somewhat recent rustc, and thus are gated on the
+// ldk_bench flag.
+#[cfg(ldk_bench)]
+const _NODE_MAP_SIZE_TWO_CACHE_LINES: usize = 128 - core::mem::size_of::<(NodeId, PathBuildingHop)>();
+#[cfg(ldk_bench)]
+const _NODE_MAP_SIZE_EXACTLY_CACHE_LINES: usize = core::mem::size_of::<(NodeId, PathBuildingHop)>() - 128;
+
impl<'a> core::fmt::Debug for PathBuildingHop<'a> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
let mut debug_struct = f.debug_struct("PathBuildingHop");
}
}
+ let mut private_hop_key_cache = HashMap::with_capacity(
+ payment_params.payee.unblinded_route_hints().iter().map(|path| path.0.len()).sum()
+ );
+
+ // Because we store references to private hop node_ids in `dist`, below, we need them to exist
+ // (as `NodeId`, not `PublicKey`) for the lifetime of `dist`. Thus, we calculate all the keys
+ // we'll need here and simply fetch them when routing.
+ private_hop_key_cache.insert(maybe_dummy_payee_pk, NodeId::from_pubkey(&maybe_dummy_payee_pk));
+ for route in payment_params.payee.unblinded_route_hints().iter() {
+ for hop in route.0.iter() {
+ private_hop_key_cache.insert(hop.src_node_id, NodeId::from_pubkey(&hop.src_node_id));
+ }
+ }
+
// The main heap containing all candidate next-hops sorted by their score (max(fee,
// htlc_minimum)). Ideally this would be a heap which allowed cheap score reduction instead of
// adding duplicate entries when we find a better path to a given node.
score_params);
let path_penalty_msat = $next_hops_path_penalty_msat
.saturating_add(channel_penalty_msat);
- let new_graph_node = RouteGraphNode {
- node_id: src_node_id,
- lowest_fee_to_node: total_fee_msat,
- total_cltv_delta: hop_total_cltv_delta,
- value_contribution_msat,
- path_htlc_minimum_msat,
- path_penalty_msat,
- path_length_to_node,
- };
// Update the way of reaching $candidate.source()
// with the given short_channel_id (from $candidate.target()),
.saturating_add(path_penalty_msat);
if !old_entry.was_processed && new_cost < old_cost {
+ let new_graph_node = RouteGraphNode {
+ node_id: src_node_id,
+ score: cmp::max(total_fee_msat, path_htlc_minimum_msat).saturating_add(path_penalty_msat),
+ total_cltv_delta: hop_total_cltv_delta,
+ value_contribution_msat,
+ path_length_to_node,
+ };
targets.push(new_graph_node);
old_entry.next_hops_fee_msat = $next_hops_fee_msat;
old_entry.hop_use_fee_msat = hop_use_fee_msat;
// meaning how much will be paid in fees after this node (to the best of our knowledge).
// This data can later be helpful to optimize routing (pay lower fees).
macro_rules! add_entries_to_cheapest_to_target_node {
- ( $node: expr, $node_id: expr, $fee_to_target_msat: expr, $next_hops_value_contribution: expr,
- $next_hops_path_htlc_minimum_msat: expr, $next_hops_path_penalty_msat: expr,
+ ( $node: expr, $node_id: expr, $next_hops_value_contribution: expr,
$next_hops_cltv_delta: expr, $next_hops_path_length: expr ) => {
+ let fee_to_target_msat;
+ let next_hops_path_htlc_minimum_msat;
+ let next_hops_path_penalty_msat;
let skip_node = if let Some(elem) = dist.get_mut(&$node_id) {
let was_processed = elem.was_processed;
elem.was_processed = true;
+ fee_to_target_msat = elem.total_fee_msat;
+ next_hops_path_htlc_minimum_msat = elem.path_htlc_minimum_msat;
+ next_hops_path_penalty_msat = elem.path_penalty_msat;
was_processed
} else {
// Entries are added to dist in add_entry!() when there is a channel from a node.
// Because there are no channels from payee, it will not have a dist entry at this point.
// If we're processing any other node, it is always be the result of a channel from it.
debug_assert_eq!($node_id, maybe_dummy_payee_node_id);
+ fee_to_target_msat = 0;
+ next_hops_path_htlc_minimum_msat = 0;
+ next_hops_path_penalty_msat = 0;
false
};
if !skip_node {
if let Some(first_channels) = first_hop_targets.get(&$node_id) {
for details in first_channels {
- let candidate = CandidateRouteHop::FirstHop { details, node_id: our_node_id };
- add_entry!(&candidate, $fee_to_target_msat,
+ let candidate = CandidateRouteHop::FirstHop {
+ details, payer_node_id: &our_node_id,
+ };
+ add_entry!(&candidate, fee_to_target_msat,
$next_hops_value_contribution,
- $next_hops_path_htlc_minimum_msat, $next_hops_path_penalty_msat,
+ next_hops_path_htlc_minimum_msat, next_hops_path_penalty_msat,
$next_hops_cltv_delta, $next_hops_path_length);
}
}
short_channel_id: *chan_id,
};
add_entry!(&candidate,
- $fee_to_target_msat,
+ fee_to_target_msat,
$next_hops_value_contribution,
- $next_hops_path_htlc_minimum_msat,
- $next_hops_path_penalty_msat,
+ next_hops_path_htlc_minimum_msat,
+ next_hops_path_penalty_msat,
$next_hops_cltv_delta, $next_hops_path_length);
}
}
// place where it could be added.
payee_node_id_opt.map(|payee| first_hop_targets.get(&payee).map(|first_channels| {
for details in first_channels {
- let candidate = CandidateRouteHop::FirstHop { details, node_id: our_node_id };
+ let candidate = CandidateRouteHop::FirstHop {
+ details, payer_node_id: &our_node_id,
+ };
let added = add_entry!(&candidate, 0, path_value_msat,
0, 0u64, 0, 0).is_some();
log_trace!(logger, "{} direct route to payee via {}",
// If not, targets.pop() will not even let us enter the loop in step 2.
None => {},
Some(node) => {
- add_entries_to_cheapest_to_target_node!(node, payee, 0, path_value_msat, 0, 0u64, 0, 0);
+ add_entries_to_cheapest_to_target_node!(node, payee, path_value_msat, 0, 0);
},
});
sort_first_hop_channels(first_channels, &used_liquidities, recommended_value_msat,
our_node_pubkey);
for details in first_channels {
- let first_hop_candidate = CandidateRouteHop::FirstHop { details, node_id: our_node_id};
+ let first_hop_candidate = CandidateRouteHop::FirstHop {
+ details, payer_node_id: &our_node_id,
+ };
let blinded_path_fee = match compute_fees(path_contribution_msat, candidate.fees()) {
Some(fee) => fee,
None => continue
let mut aggregate_path_contribution_msat = path_value_msat;
for (idx, (hop, prev_hop_id)) in hop_iter.zip(prev_hop_iter).enumerate() {
- let source = NodeId::from_pubkey(&hop.src_node_id);
- let target = NodeId::from_pubkey(&prev_hop_id);
+ let target = private_hop_key_cache.get(&prev_hop_id).unwrap();
if let Some(first_channels) = first_hop_targets.get(&target) {
if first_channels.iter().any(|d| d.outbound_scid_alias == Some(hop.short_channel_id)) {
sort_first_hop_channels(first_channels, &used_liquidities,
recommended_value_msat, our_node_pubkey);
for details in first_channels {
- let first_hop_candidate = CandidateRouteHop::FirstHop { details, node_id: our_node_id};
+ let first_hop_candidate = CandidateRouteHop::FirstHop {
+ details, payer_node_id: &our_node_id,
+ };
add_entry!(&first_hop_candidate,
aggregate_next_hops_fee_msat, aggregate_path_contribution_msat,
aggregate_next_hops_path_htlc_minimum_msat, aggregate_next_hops_path_penalty_msat,
sort_first_hop_channels(first_channels, &used_liquidities,
recommended_value_msat, our_node_pubkey);
for details in first_channels {
- let first_hop_candidate = CandidateRouteHop::FirstHop { details, node_id: our_node_id};
+ let first_hop_candidate = CandidateRouteHop::FirstHop {
+ details, payer_node_id: &our_node_id,
+ };
add_entry!(&first_hop_candidate,
aggregate_next_hops_fee_msat,
aggregate_path_contribution_msat,
// Both these cases (and other cases except reaching recommended_value_msat) mean that
// paths_collection will be stopped because found_new_path==false.
// This is not necessarily a routing failure.
- 'path_construction: while let Some(RouteGraphNode { node_id, lowest_fee_to_node, total_cltv_delta, mut value_contribution_msat, path_htlc_minimum_msat, path_penalty_msat, path_length_to_node, .. }) = targets.pop() {
+ 'path_construction: while let Some(RouteGraphNode { node_id, total_cltv_delta, mut value_contribution_msat, path_length_to_node, .. }) = targets.pop() {
// Since we're going payee-to-payer, hitting our node as a target means we should stop
// traversing the graph and arrange the path out of what we found.
let target = ordered_hops.last().unwrap().0.candidate.target().unwrap_or(maybe_dummy_payee_node_id);
if let Some(first_channels) = first_hop_targets.get(&target) {
for details in first_channels {
- if let Some(scid) = ordered_hops.last().unwrap().0.candidate.short_channel_id() {
- if details.get_outbound_payment_scid().unwrap() == scid {
+ if let CandidateRouteHop::FirstHop { details: last_hop_details, .. }
+ = ordered_hops.last().unwrap().0.candidate
+ {
+ if details.get_outbound_payment_scid() == last_hop_details.get_outbound_payment_scid() {
ordered_hops.last_mut().unwrap().1 = details.counterparty.features.to_context();
features_set = true;
break;
match network_nodes.get(&node_id) {
None => {},
Some(node) => {
- add_entries_to_cheapest_to_target_node!(node, node_id, lowest_fee_to_node,
- value_contribution_msat, path_htlc_minimum_msat, path_penalty_msat,
+ add_entries_to_cheapest_to_target_node!(node, node_id,
+ value_contribution_msat,
total_cltv_delta, path_length_to_node);
},
}
});
for idx in 0..(selected_route.len() - 1) {
if idx + 1 >= selected_route.len() { break; }
- if iter_equal(selected_route[idx].hops.iter().map(|h| (h.0.candidate.id(), h.0.candidate.target())),
- selected_route[idx + 1].hops.iter().map(|h| (h.0.candidate.id(), h.0.candidate.target()))) {
+ if iter_equal(selected_route[idx ].hops.iter().map(|h| (h.0.candidate.id(), h.0.candidate.target())),
+ selected_route[idx + 1].hops.iter().map(|h| (h.0.candidate.id(), h.0.candidate.target()))) {
let new_value = selected_route[idx].get_value_msat() + selected_route[idx + 1].get_value_msat();
selected_route[idx].update_value_and_recompute_fees(new_value);
selected_route.remove(idx + 1);
fn channel_penalty_msat(
&self, candidate: &CandidateRouteHop, usage: ChannelUsage, score_params: &ProbabilisticScoringFeeParameters
) -> u64 {
- let scid = match candidate.short_channel_id() {
- Some(scid) => scid,
- None => return 0,
- };
- let target = match candidate.target() {
- Some(target) => target,
- None => return 0,
+ let (scid, target) = match candidate {
+ CandidateRouteHop::PublicHop { info, short_channel_id } => {
+ (short_channel_id, info.target())
+ },
+ _ => return 0,
};
let source = candidate.source();
if let Some(penalty) = score_params.manual_node_penalties.get(&target) {
PublicKey::from_secret_key(&secp_ctx, &recipient_privkey())
}
- fn sender_node_id() -> NodeId {
- NodeId::from_pubkey(&sender_pubkey())
- }
-
fn recipient_node_id() -> NodeId {
NodeId::from_pubkey(&recipient_pubkey())
}
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
};
let channel = network_graph.read_only().channel(42).unwrap().to_owned();
- let (info, target) = channel.as_directed_from(&source).unwrap();
+ let (info, _) = channel.as_directed_from(&source).unwrap();
let candidate = CandidateRouteHop::PublicHop {
info,
short_channel_id: 42,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
};
let channel = network_graph.read_only().channel(42).unwrap().to_owned();
- let (info, target) = channel.as_directed_from(&source).unwrap();
+ let (info, _) = channel.as_directed_from(&source).unwrap();
let candidate = CandidateRouteHop::PublicHop {
info,
short_channel_id: 42,
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
}
+ #[test]
fn remembers_historical_failures() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
};
let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
let source = source_node_id();
+ let target = target_node_id();
let usage = ChannelUsage {
amount_msat: 100,
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
};
- let network_graph = network_graph.read_only();
- let channel = network_graph.channel(42).unwrap();
- let (info, target) = channel.as_directed_from(&source).unwrap();
- let candidate = CandidateRouteHop::PublicHop {
- info,
- short_channel_id: 42,
- };
- // With no historical data the normal liquidity penalty calculation is used.
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 168);
+ {
+ let network_graph = network_graph.read_only();
+ let channel = network_graph.channel(42).unwrap();
+ let (info, _) = channel.as_directed_from(&source).unwrap();
+ let candidate = CandidateRouteHop::PublicHop {
+ info,
+ short_channel_id: 42,
+ };
+
+ // With no historical data the normal liquidity penalty calculation is used.
+ assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 168);
+ }
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
None);
assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 42, ¶ms),
None);
scorer.payment_path_failed(&payment_path_for_amount(1), 42);
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 2048);
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage_1, ¶ms), 249);
+ {
+ let network_graph = network_graph.read_only();
+ let channel = network_graph.channel(42).unwrap();
+ let (info, _) = channel.as_directed_from(&source).unwrap();
+ let candidate = CandidateRouteHop::PublicHop {
+ info,
+ short_channel_id: 42,
+ };
+
+ assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 2048);
+ assert_eq!(scorer.channel_penalty_msat(&candidate, usage_1, ¶ms), 249);
+ }
// The "it failed" increment is 32, where the probability should lie several buckets into
// the first octile.
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
// Even after we tell the scorer we definitely have enough available liquidity, it will
// still remember that there was some failure in the past, and assign a non-0 penalty.
scorer.payment_path_failed(&payment_path_for_amount(1000), 43);
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 105);
+ {
+ let network_graph = network_graph.read_only();
+ let channel = network_graph.channel(42).unwrap();
+ let (info, _) = channel.as_directed_from(&source).unwrap();
+ let candidate = CandidateRouteHop::PublicHop {
+ info,
+ short_channel_id: 42,
+ };
+
+ assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 105);
+ }
// The first points should be decayed just slightly and the last bucket has a new point.
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
Some(([31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0],
// Advance the time forward 16 half-lives (which the docs claim will ensure all data is
// gone), and check that we're back to where we started.
SinceEpoch::advance(Duration::from_secs(10 * 16));
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 168);
+ {
+ let network_graph = network_graph.read_only();
+ let channel = network_graph.channel(42).unwrap();
+ let (info, _) = channel.as_directed_from(&source).unwrap();
+ let candidate = CandidateRouteHop::PublicHop {
+ info,
+ short_channel_id: 42,
+ };
+
+ assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 168);
+ }
// Once fully decayed we still have data, but its all-0s. In the future we may remove the
// data entirely instead.
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
};
scorer.payment_path_failed(&payment_path_for_amount(1), 42);
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 2050);
- usage.inflight_htlc_msat = 0;
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 866);
+ {
+ let network_graph = network_graph.read_only();
+ let channel = network_graph.channel(42).unwrap();
+ let (info, _) = channel.as_directed_from(&source).unwrap();
+ let candidate = CandidateRouteHop::PublicHop {
+ info,
+ short_channel_id: 42,
+ };
- let usage = ChannelUsage {
- amount_msat: 1,
- inflight_htlc_msat: 0,
- effective_capacity: EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: 0 },
- };
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 2048);
+ assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 2050);
+ usage.inflight_htlc_msat = 0;
+ assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 866);
+
+ let usage = ChannelUsage {
+ amount_msat: 1,
+ inflight_htlc_msat: 0,
+ effective_capacity: EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: 0 },
+ };
+ assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 2048);
+ }
// Advance to decay all liquidity offsets to zero.
SinceEpoch::advance(Duration::from_secs(60 * 60 * 10));
}
struct WrapperLog {
- logger: Arc<Logger>
+ logger: Arc<dyn Logger>
}
impl WrapperLog {
- fn new(logger: Arc<Logger>) -> WrapperLog {
+ fn new(logger: Arc<dyn Logger>) -> WrapperLog {
WrapperLog {
logger,
}
fn test_logging_macros() {
let mut logger = TestLogger::new();
logger.enable(Level::Gossip);
- let logger : Arc<Logger> = Arc::new(logger);
+ let logger : Arc<dyn Logger> = Arc::new(logger);
let wrapper = WrapperLog::new(Arc::clone(&logger));
wrapper.call_macros();
}
#[inline]
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
match self.0 {
- 0...0xFC => {
+ 0..=0xFC => {
(self.0 as u8).write(writer)
},
- 0xFD...0xFFFF => {
+ 0xFD..=0xFFFF => {
0xFDu8.write(writer)?;
(self.0 as u16).write(writer)
},
- 0x10000...0xFFFFFFFF => {
+ 0x10000..=0xFFFFFFFF => {
0xFEu8.write(writer)?;
(self.0 as u32).write(writer)
},
use crate::ln::script::ShutdownScript;
use crate::offers::invoice::UnsignedBolt12Invoice;
use crate::offers::invoice_request::UnsignedInvoiceRequest;
-use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
+use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId, RoutingFees};
use crate::routing::utxo::{UtxoLookup, UtxoLookupError, UtxoResult};
-use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParameters, Router, ScorerAccountingForInFlightHtlcs};
+use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParameters, RouteHintHop, Router, ScorerAccountingForInFlightHtlcs};
use crate::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp};
use crate::sync::RwLock;
use crate::util::config::UserConfig;
#[cfg(feature = "std")]
use std::time::{SystemTime, UNIX_EPOCH};
+use bitcoin::psbt::PartiallySignedTransaction;
use bitcoin::Sequence;
pub fn pubkey(byte: u8) -> PublicKey {
let scorer = ScorerAccountingForInFlightHtlcs::new(scorer, &inflight_htlcs);
for path in &route.paths {
let mut aggregate_msat = 0u64;
+ let mut prev_hop_node = payer;
for (idx, hop) in path.hops.iter().rev().enumerate() {
aggregate_msat += hop.fee_msat;
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Unknown,
};
- // Since the path is reversed, the last element in our iteration is the first
- // hop.
if idx == path.hops.len() - 1 {
- let first_hops = match first_hops {
- Some(hops) => hops,
- None => continue,
- };
- if first_hops.len() == 0 {
- continue;
+ if let Some(first_hops) = first_hops {
+ if let Some(idx) = first_hops.iter().position(|h| h.get_outbound_payment_scid() == Some(hop.short_channel_id)) {
+ let node_id = NodeId::from_pubkey(payer);
+ let candidate = CandidateRouteHop::FirstHop {
+ details: first_hops[idx],
+ payer_node_id: &node_id,
+ };
+ scorer.channel_penalty_msat(&candidate, usage, &());
+ continue;
+ }
}
- let idx = if first_hops.len() > 1 { route.paths.iter().position(|p| p == path).unwrap_or(0) } else { 0 };
- let candidate = CandidateRouteHop::FirstHop {
- details: first_hops[idx],
- node_id: NodeId::from_pubkey(payer)
+ }
+ let network_graph = self.network_graph.read_only();
+ if let Some(channel) = network_graph.channel(hop.short_channel_id) {
+ let (directed, _) = channel.as_directed_to(&NodeId::from_pubkey(&hop.pubkey)).unwrap();
+ let candidate = CandidateRouteHop::PublicHop {
+ info: directed,
+ short_channel_id: hop.short_channel_id,
};
scorer.channel_penalty_msat(&candidate, usage, &());
} else {
- let network_graph = self.network_graph.read_only();
- let channel = match network_graph.channel(hop.short_channel_id) {
- Some(channel) => channel,
- None => continue,
- };
- let channel = match channel.as_directed_to(&NodeId::from_pubkey(&hop.pubkey)) {
- Some(channel) => channel,
- None => panic!("Channel directed to {} was not found", hop.pubkey),
- };
- let candidate = CandidateRouteHop::PublicHop {
- info: channel.0,
+ let target_node_id = NodeId::from_pubkey(&hop.pubkey);
+ let route_hint = RouteHintHop {
+ src_node_id: *prev_hop_node,
short_channel_id: hop.short_channel_id,
+ fees: RoutingFees { base_msat: 0, proportional_millionths: 0 },
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: None,
+ htlc_maximum_msat: None,
+ };
+ let candidate = CandidateRouteHop::PrivateHop {
+ hint: &route_hint,
+ target_node_id: &target_node_id,
};
scorer.channel_penalty_msat(&candidate, usage, &());
}
+ prev_hop_node = &hop.pubkey;
}
}
}
pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<TestChannelSigner>)>>,
pub monitor_updates: Mutex<HashMap<ChannelId, Vec<channelmonitor::ChannelMonitorUpdate>>>,
pub latest_monitor_update_id: Mutex<HashMap<ChannelId, (OutPoint, u64, MonitorUpdateId)>>,
- pub chain_monitor: chainmonitor::ChainMonitor<TestChannelSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a chainmonitor::Persist<TestChannelSigner>>,
+ pub chain_monitor: chainmonitor::ChainMonitor<TestChannelSigner, &'a TestChainSource, &'a dyn chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a dyn chainmonitor::Persist<TestChannelSigner>>,
pub keys_manager: &'a TestKeysInterface,
/// If this is set to Some(), the next update_channel call (not watch_channel) must be a
/// ChannelForceClosed event for the given channel_id with should_broadcast set to the given
pub expect_monitor_round_trip_fail: Mutex<Option<ChannelId>>,
}
impl<'a> TestChainMonitor<'a> {
- pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist<TestChannelSigner>, keys_manager: &'a TestKeysInterface) -> Self {
+ pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn chainmonitor::Persist<TestChannelSigner>, keys_manager: &'a TestKeysInterface) -> Self {
Self {
added_monitors: Mutex::new(Vec::new()),
monitor_updates: Mutex::new(HashMap::new()),
fn channel_penalty_msat(
&self, candidate: &CandidateRouteHop, usage: ChannelUsage, _score_params: &Self::ScoreParams
) -> u64 {
- let short_channel_id = match candidate.short_channel_id() {
+ let short_channel_id = match candidate.globally_unique_short_channel_id() {
Some(scid) => scid,
None => return 0,
};
Ok(ScriptBuf::new_p2pkh(&public_key.pubkey_hash()))
}
- fn sign_tx(&self, mut tx: Transaction) -> Result<Transaction, ()> {
+ fn sign_psbt(&self, psbt: PartiallySignedTransaction) -> Result<Transaction, ()> {
+ let mut tx = psbt.extract_tx();
let utxos = self.utxos.borrow();
for i in 0..tx.input.len() {
if let Some(utxo) = utxos.iter().find(|utxo| utxo.outpoint == tx.input[i].previous_output) {
[package]
name = "msrv-check"
version = "0.1.0"
-edition = "2018"
+edition = "2021"
[dependencies]
lightning = { path = "../lightning" }
lightning-persister = { path = "../lightning-persister" }
lightning-background-processor = { path = "../lightning-background-processor", features = ["futures"] }
lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync" }
+lightning-custom-message = { path = "../lightning-custom-message" }
[package]
name = "no-std-check"
version = "0.1.0"
-edition = "2018"
+edition = "2021"
[features]
default = ["lightning/no-std", "lightning-invoice/no-std", "lightning-rapid-gossip-sync/no-std"]