strategy:
fail-fast: false
matrix:
- platform: [ ubuntu-latest ]
- toolchain: [ stable,
- beta,
- # 1.41.1 is MSRV for Rust-Lightning, lightning-invoice, and lightning-persister
- 1.41.1,
- # 1.45.2 is MSRV for lightning-net-tokio, lightning-block-sync, lightning-background-processor, and coverage generation
- 1.45.2,
- # 1.47.0 will be the MSRV for no-std builds using hashbrown once core2 is updated
- 1.47.0]
+ platform: [ ubuntu-latest, windows-latest, macos-latest ]
+ toolchain: [ stable, beta ]
include:
- toolchain: stable
- build-net-tokio: true
- build-no-std: true
- build-futures: true
- - toolchain: stable
- platform: macos-latest
- build-net-tokio: true
- build-no-std: true
- build-futures: true
- - toolchain: beta
- platform: macos-latest
- build-net-tokio: true
- build-no-std: true
- build-futures: true
- - toolchain: stable
- platform: windows-latest
- build-net-tokio: true
- build-no-std: true
- build-futures: true
- - toolchain: beta
- platform: windows-latest
- build-net-tokio: true
- build-no-std: true
- build-futures: true
- - toolchain: beta
- build-net-tokio: true
- build-no-std: true
- build-futures: true
- - toolchain: 1.41.1
- build-no-std: false
- test-log-variants: true
- build-futures: false
- - toolchain: 1.45.2
- build-net-old-tokio: true
- build-net-tokio: true
- build-no-std: false
- build-futures: true
+ platform: ubuntu-latest
coverage: true
- - toolchain: 1.47.0
- build-futures: true
- build-no-std: true
+ # 1.48.0 is the MSRV for all crates except lightning-transaction-sync and Win/Mac
+ - toolchain: 1.48.0
+ platform: ubuntu-latest
+ # Windows requires 1.49.0 because that's the MSRV for supported Tokio
+ - toolchain: 1.49.0
+ platform: windows-latest
+ # MacOS-latest requires 1.54.0 because that's what's required for linking to work properly
+ - toolchain: 1.54.0
+ platform: macos-latest
runs-on: ${{ matrix.platform }}
steps:
- name: Checkout source code
toolchain: ${{ matrix.toolchain }}
override: true
profile: minimal
- - name: Pin tokio to 1.14 for Rust 1.45
- if: "matrix.build-net-old-tokio"
- run: cargo update -p tokio --precise "1.14.0" --verbose
- env:
- CARGO_NET_GIT_FETCH_WITH_CLI: "true"
- - name: Build on Rust ${{ matrix.toolchain }} with net-tokio
- if: "matrix.build-net-tokio && !matrix.coverage"
- run: cargo build --verbose --color always
- - name: Build on Rust ${{ matrix.toolchain }} with net-tokio and full code-linking for coverage generation
- if: matrix.coverage
- run: RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always
- - name: Build on Rust ${{ matrix.toolchain }}
- if: "! matrix.build-net-tokio"
- run: |
- cargo build --verbose --color always -p lightning
- cargo build --verbose --color always -p lightning-invoice
- cargo build --verbose --color always -p lightning-persister
- - name: Build on Rust ${{ matrix.toolchain }} with all Log-Limiting features
- if: matrix.test-log-variants
- run: |
- cd lightning
- for FEATURE in $(cat Cargo.toml | grep '^max_level_' | awk '{ print $1 }'); do
- cargo build --verbose --color always --features $FEATURE
- done
- - name: Build Block Sync Clients on Rust ${{ matrix.toolchain }} with features
- if: "matrix.build-net-tokio && !matrix.coverage"
- run: |
- cd lightning-block-sync
- cargo build --verbose --color always --features rest-client
- cargo build --verbose --color always --features rpc-client
- cargo build --verbose --color always --features rpc-client,rest-client
- cargo build --verbose --color always --features rpc-client,rest-client,tokio
- - name: Build Block Sync Clients on Rust ${{ matrix.toolchain }} with features and full code-linking for coverage generation
- if: matrix.coverage
+ - name: Install no-std-check dependencies for ARM Embedded
+ if: "matrix.platform == 'ubuntu-latest'"
run: |
- cd lightning-block-sync
- RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rest-client
- RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client
- RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client,rest-client
- RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client,rest-client,tokio
- - name: Test backtrace-debug builds on Rust ${{ matrix.toolchain }}
- if: "matrix.toolchain == 'stable'"
- run: |
- cd lightning && cargo test --verbose --color always --features backtrace
- - name: Test on Rust ${{ matrix.toolchain }} with net-tokio
- if: "matrix.build-net-tokio && !matrix.coverage"
- run: cargo test --verbose --color always
- - name: Test on Rust ${{ matrix.toolchain }} with net-tokio and full code-linking for coverage generation
- if: matrix.coverage
- run: RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always
- - name: Test no-std builds on Rust ${{ matrix.toolchain }}
- if: "matrix.build-no-std && !matrix.coverage"
- shell: bash # Default on Winblows is powershell
- run: |
- for DIR in lightning lightning-invoice lightning-rapid-gossip-sync; do
- cd $DIR
- cargo test --verbose --color always --no-default-features --features no-std
- # check if there is a conflict between no-std and the default std feature
- cargo test --verbose --color always --features no-std
- # check that things still pass without grind_signatures
- # note that outbound_commitment_test only runs in this mode, because of hardcoded signature values
- cargo test --verbose --color always --no-default-features --features std
- # check if there is a conflict between no-std and the c_bindings cfg
- RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always --no-default-features --features=no-std
- cd ..
- done
- # check no-std compatibility across dependencies
- cd no-std-check
- cargo check --verbose --color always
- - name: Build no-std-check on Rust ${{ matrix.toolchain }} for ARM Embedded
- if: "matrix.build-no-std && matrix.platform == 'ubuntu-latest'"
- run: |
- cd no-std-check
rustup target add thumbv7m-none-eabi
sudo apt-get -y install gcc-arm-none-eabi
- cargo build --target=thumbv7m-none-eabi
- - name: Test on no-std builds Rust ${{ matrix.toolchain }} and full code-linking for coverage generation
- if: "matrix.build-no-std && matrix.coverage"
+ - name: shellcheck the CI script
+ if: "matrix.platform == 'ubuntu-latest'"
run: |
- cd lightning
- RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --no-default-features --features no-std
- - name: Test futures builds on Rust ${{ matrix.toolchain }}
- if: "matrix.build-futures && !matrix.coverage"
+ sudo apt-get -y install shellcheck
+ shellcheck ci/ci-tests.sh
+ - name: Run CI script with coverage generation
+ if: matrix.coverage
shell: bash # Default on Winblows is powershell
- run: |
- cd lightning-background-processor
- cargo test --verbose --color always --no-default-features --features futures
- - name: Test futures builds on Rust ${{ matrix.toolchain }} and full code-linking for coverage generation
- if: "matrix.build-futures && matrix.coverage"
+ run: LDK_COVERAGE_BUILD=true ./ci/ci-tests.sh
+ - name: Run CI script
+ if: "!matrix.coverage"
shell: bash # Default on Winblows is powershell
- run: |
- cd lightning-background-processor
- RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --no-default-features --features futures
- - name: Test on Rust ${{ matrix.toolchain }}
- if: "! matrix.build-net-tokio"
- run: |
- cargo test --verbose --color always -p lightning
- cargo test --verbose --color always -p lightning-invoice
- cargo test --verbose --color always -p lightning-rapid-gossip-sync
- cargo test --verbose --color always -p lightning-persister
- cargo test --verbose --color always -p lightning-background-processor
- - name: Test C Bindings Modifications on Rust ${{ matrix.toolchain }}
- if: "! matrix.build-net-tokio"
- run: |
- RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always -p lightning
- RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always -p lightning-invoice
- RUSTFLAGS="--cfg=c_bindings" cargo build --verbose --color always -p lightning-persister
- RUSTFLAGS="--cfg=c_bindings" cargo build --verbose --color always -p lightning-background-processor
- - name: Test Block Sync Clients on Rust ${{ matrix.toolchain }} with features
- if: "matrix.build-net-tokio && !matrix.coverage"
- run: |
- cd lightning-block-sync
- cargo test --verbose --color always --features rest-client
- cargo test --verbose --color always --features rpc-client
- cargo test --verbose --color always --features rpc-client,rest-client
- cargo test --verbose --color always --features rpc-client,rest-client,tokio
- - name: Test Block Sync Clients on Rust ${{ matrix.toolchain }} with features and full code-linking for coverage generation
- if: matrix.coverage
- run: |
- cd lightning-block-sync
- RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --features rest-client
- RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --features rpc-client
- RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --features rpc-client,rest-client
- RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --features rpc-client,rest-client,tokio
+ run: ./ci/ci-tests.sh
- name: Install deps for kcov
if: matrix.coverage
run: |
linting:
runs-on: ubuntu-latest
env:
- TOOLCHAIN: 1.47.0
+ TOOLCHAIN: stable
steps:
- name: Checkout source code
uses: actions/checkout@v3
.idea
lightning/target
lightning/ldk-net_graph-*.bin
+lightning-custom-message/target
no-std-check/target
-
--------------- / (as EventsProvider) ^ | |
| PeerManager |- \ | | |
--------------- \ | (is-a) | |
- | ----------------- \ _---------------- / /
- | | chain::Access | \ / | ChainMonitor |---------------
- | ----------------- \ / ----------------
+ | -------------- \ _---------------- / /
+ | | UtxoLookup | \ / | ChainMonitor |---------------
+ | -------------- \ / ----------------
| ^ \ / |
(as RoutingMessageHandler) | v v
\ ----------------- --------- -----------------
+# 0.0.114 - Mar 3, 2023 - "Faster Async BOLT12 Retries"
+
+## API Updates
+ * `InvoicePayer` has been removed and its features moved directly into
+ `ChannelManager`. As such it now requires a simplified `Router` and supports
+ `send_payment_with_retry` (and friends). `ChannelManager::retry_payment` was
+ removed in favor of the automated retries. Invoice payment utilities in
+ `lightning-invoice` now call the new code (#1812, #1916, #1929, #2007, etc).
+ * `Sign`/`BaseSign` has been renamed `ChannelSigner`, with `EcdsaChannelSigner`
+ split out in anticipation of future schnorr/taproot support (#1967).
+ * The catch-all `KeysInterface` was split into `EntropySource`, `NodeSigner`,
+ and `SignerProvider`. `KeysManager` implements all three (#1910, #1930).
+ * `KeysInterface::get_node_secret` is now `KeysManager::get_node_secret_key`
+ and is no longer required for external signers (#1951, #2070).
+ * A `lightning-transaction-sync` crate has been added which implements keeping
+ LDK in sync with the chain via an esplora server (#1870). Note that it can
+ only be used on nodes that *never* ran a previous version of LDK.
+ * `Score` is updated in `BackgroundProcessor` instead of via `Router` (#1996).
+ * `ChainAccess::get_utxo` (now `UtxoAccess`) can now be resolved async (#1980).
+ * BOLT12 `Offer`, `InvoiceRequest`, `Invoice` and `Refund` structs as well as
+ associated builders have been added. Such invoices cannot yet be paid due to
+ missing support for blinded path payments (#1927, #1908, #1926).
+ * A `lightning-custom-message` crate has been added to make combining multiple
+ custom messages into one enum/handler easier (#1832).
+ * `Event::PaymentPathFailed` is now generated for failure to send an HTLC
+ over the first hop on our local channel (#2014, #2043).
+ * `lightning-net-tokio` no longer requires an `Arc` on `PeerManager` (#1968).
+ * `ChannelManager::list_recent_payments` was added (#1873).
+ * `lightning-background-processor` `std` is now optional in async mode (#1962).
+ * `create_phantom_invoice` can now be used in `no-std` (#1985).
+ * The required final CLTV delta on inbound payments is now configurable (#1878)
+ * bitcoind RPC error code and message are now surfaced in `block-sync` (#2057).
+ * Get `historical_estimated_channel_liquidity_probabilities` was added (#1961).
+ * `ChannelManager::fail_htlc_backwards_with_reason` was added (#1948).
+ * Macros which implement serialization using TLVs or straight writing of struct
+ fields are now public (#1823, #1976, #1977).
+
+## Backwards Compatibility
+ * Any inbound payments with a custom final CLTV delta will be rejected by LDK
+ if you downgrade prior to receipt (#1878).
+ * `Event::PaymentPathFailed::network_update` will always be `None` if an
+ 0.0.114-generated event is read by a prior version of LDK (#2043).
+ * `Event::PaymentPathFailed::all_paths_failed` will always be false if an
+ 0.0.114-generated event is read by a prior version of LDK. Users who rely on
+ it to determine payment retries should migrate to `Event::PaymentFailed`, in
+ a separate release prior to upgrading to LDK 0.0.114 if downgrading is
+ supported (#2043).
+
+## Performance Improvements
+ * Channel data is now stored per-peer and channel updates across multiple
+ peers can be operated on simultaneously (#1507).
+ * Routefinding is roughly 1.5x faster (#1799).
+ * Deserializing a `NetworkGraph` is roughly 6x faster (#2016).
+ * Memory usage for a `NetworkGraph` has been reduced substantially (#2040).
+ * `KeysInterface::get_secure_random_bytes` is roughly 200x faster (#1974).
+
+## Bug Fixes
+ * Fixed a bug where a delay in processing a `PaymentSent` event longer than the
+ time taken to persist a `ChannelMonitor` update, when occurring immediately
+ prior to a crash, may result in the `PaymentSent` event being lost (#2048).
+ * Fixed spurious rejections of rapid gossip sync data when the graph has been
+ updated by other means between gossip syncs (#2046).
+ * Fixed a panic in `KeysManager` when the high bit of `starting_time_nanos`
+ is set (#1935).
+ * Resolved an issue where the `ChannelManager::get_persistable_update_future`
+ future would fail to wake until a second notification occurs (#2064).
+ * Resolved a memory leak when using `ChannelManager::send_probe` (#2037).
+ * Fixed a deadlock on some platforms at least when using async `ChannelMonitor`
+ updating (#2006).
+ * Removed debug-only assertions which were reachable in threaded code (#1964).
+ * In some cases when payment sending fails on our local channel retries no
+ longer take the same path and thus never succeed (#2014).
+ * Retries for spontaneous payments have been fixed (#2002).
+ * Return an `Err` if `lightning-persister` fails to read the directory listing
+ rather than panicing (#1943).
+ * `peer_disconnected` will now never be called without `peer_connected` (#2035)
+
+## Security
+0.0.114 fixes several denial-of-service vulnerabilities which are reachable from
+untrusted input from channel counterparties or in deployments accepting inbound
+connections or channels. It also fixes a denial-of-service vulnerability in rare
+cases in the route finding logic.
+ * The number of pending un-funded channels as well as peers without funded
+ channels is now limited to avoid denial of service (#1988).
+ * A second `channel_ready` message received immediately after the first could
+ lead to a spurious panic (#2071). This issue was introduced with 0conf
+ support in LDK 0.0.107.
+ * A division-by-zero issue was fixed in the `ProbabilisticScorer` if the amount
+ being sent (including previous-hop fees) is equal to a channel's capacity
+ while walking the graph (#2072). The division-by-zero was introduced with
+ historical data tracking in LDK 0.0.112.
+
+In total, this release features 130 files changed, 21457 insertions, 10113
+deletions in 343 commits from 18 authors, in alphabetical order:
+ * Alec Chen
+ * Allan Douglas R. de Oliveira
+ * Andrei
+ * Arik Sosman
+ * Daniel Granhão
+ * Duncan Dean
+ * Elias Rohrer
+ * Jeffrey Czyz
+ * John Cantrell
+ * Kurtsley
+ * Matt Corallo
+ * Max Fang
+ * Omer Yacine
+ * Valentine Wallace
+ * Viktor Tigerström
+ * Wilmer Paulino
+ * benthecarman
+ * jurvis
+
+
# 0.0.113 - Dec 16, 2022 - "Big Movement Intercepted"
## API Updates
]
exclude = [
+ "lightning-custom-message",
+ "lightning-transaction-sync",
"no-std-check",
]
-# Our tests do actual crypto and lots of work, the tradeoff for -O1 is well worth it.
-# Ideally we would only do this in profile.test, but profile.test only applies to
-# the test binary, not dependencies, which means most of the critical code still
-# gets compiled as -O0. See
+# Our tests do actual crypto and lots of work, the tradeoff for -O2 is well
+# worth it. Note that we only apply optimizations to dependencies, not workspace
+# crates themselves.
# https://doc.rust-lang.org/cargo/reference/profiles.html#profile-selection
+[profile.dev.package."*"]
+opt-level = 2
+
+# It appears some minimal optimizations are required to inline many std methods
+# and reduce the otherwise-substantial time spent in std self-checks. We do so
+# here but ensure we keep LTO disabled as otherwise we're re-optimizing all our
+# dependencies every time we make any local changes.
[profile.dev]
opt-level = 1
-panic = "abort"
+lto = "off"
[profile.release]
opt-level = 3
* 07DF3E57A548CCFB7530709189BBB8663E2E65CE (Matt Corallo)
* 5DBC576CCCF546CA72AB06CE912EF12EA67705F5 (Jeffrey Czyz)
- * 729E9D9D92C75A5FBFEEE057B5DD717BEF7CA5B1 (Wilmer Paulino)
+ * 0A156842CF60B58BD826ABDD808FC696767C6147 (Wilmer Paulino)
* BD6EED4D339EDBF7E7CE7F8836153082BDF676FD (Elias Rohrer)
* 6E0287D8849AE741E47CC586FD3E106A2CE099B4 (Valentine Wallace)
* 69CFEA635D0E6E6F13FD9D9136D932FCAC0305F0 (Arik Sosman)
--- /dev/null
+#!/bin/bash
+set -eox pipefail
+
+RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }')
+HOST_PLATFORM="$(rustc --version --verbose | grep "host:" | awk '{ print $2 }')"
+
+# Tokio MSRV on versions newer than 1.14 is rustc 1.49
+[ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p tokio --precise "1.14.0" --verbose
+[ "$LDK_COVERAGE_BUILD" != "" ] && export RUSTFLAGS="-C link-dead-code"
+
+export RUST_BACKTRACE=1
+
+echo -e "\n\nBuilding and testing all workspace crates..."
+cargo build --verbose --color always
+cargo test --verbose --color always
+
+echo -e "\n\nBuilding with all Log-Limiting features"
+pushd lightning
+grep '^max_level_' Cargo.toml | awk '{ print $1 }'| while read -r FEATURE; do
+ cargo build --verbose --color always --features "$FEATURE"
+done
+popd
+
+if [ "$RUSTC_MINOR_VERSION" -gt 51 ]; then # Current `object` MSRV, subject to change
+ echo -e "\n\nTest backtrace-debug builds"
+ pushd lightning
+ cargo test --verbose --color always --features backtrace
+ popd
+fi
+
+echo -e "\n\nTesting no-std flags in various combinations"
+for DIR in lightning lightning-invoice lightning-rapid-gossip-sync; do
+ pushd $DIR
+ cargo test --verbose --color always --no-default-features --features no-std
+ # check if there is a conflict between no-std and the default std feature
+ cargo test --verbose --color always --features no-std
+ # check that things still pass without grind_signatures
+ # note that outbound_commitment_test only runs in this mode, because of hardcoded signature values
+ cargo test --verbose --color always --no-default-features --features std
+ # check if there is a conflict between no-std and the c_bindings cfg
+ RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always --no-default-features --features=no-std
+ popd
+done
+
+echo -e "\n\nTesting no-std build on a downstream no-std crate"
+# check no-std compatibility across dependencies
+pushd no-std-check
+cargo check --verbose --color always --features lightning-transaction-sync
+popd
+
+if [ -f "$(which arm-none-eabi-gcc)" ]; then
+ pushd no-std-check
+ cargo build --target=thumbv7m-none-eabi
+ popd
+fi
+
+echo -e "\n\nBuilding and testing Block Sync Clients with features"
+pushd lightning-block-sync
+cargo build --verbose --color always --features rest-client
+cargo test --verbose --color always --features rest-client
+cargo build --verbose --color always --features rpc-client
+cargo test --verbose --color always --features rpc-client
+cargo build --verbose --color always --features rpc-client,rest-client
+cargo test --verbose --color always --features rpc-client,rest-client
+cargo build --verbose --color always --features rpc-client,rest-client,tokio
+cargo test --verbose --color always --features rpc-client,rest-client,tokio
+popd
+
+if [[ $RUSTC_MINOR_VERSION -gt 67 && "$HOST_PLATFORM" != *windows* ]]; then
+ echo -e "\n\nBuilding and testing Transaction Sync Clients with features"
+ pushd lightning-transaction-sync
+ cargo build --verbose --color always --features esplora-blocking
+ cargo test --verbose --color always --features esplora-blocking
+ cargo build --verbose --color always --features esplora-async
+ cargo test --verbose --color always --features esplora-async
+ cargo build --verbose --color always --features esplora-async-https
+ cargo test --verbose --color always --features esplora-async-https
+ popd
+fi
+
+echo -e "\n\nTest futures builds"
+pushd lightning-background-processor
+cargo test --verbose --color always --no-default-features --features futures
+popd
+
+if [ "$RUSTC_MINOR_VERSION" -gt 55 ]; then
+ echo -e "\n\nTest Custom Message Macros"
+ pushd lightning-custom-message
+ cargo test --verbose --color always
+ popd
+fi
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use crate::utils::test_logger;
+use core::convert::TryFrom;
+use lightning::offers::parse::{Bech32Encode, ParseError};
+
+#[inline]
+pub fn do_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
+ if let Ok(bech32_encoded) = std::str::from_utf8(data) {
+ if let Ok(bytes) = Bytes::from_bech32_str(bech32_encoded) {
+ let bech32_encoded = bytes.to_string();
+ assert_eq!(bytes, Bytes::from_bech32_str(&bech32_encoded).unwrap());
+ }
+ }
+}
+
+#[derive(Debug, PartialEq)]
+struct Bytes(Vec<u8>);
+
+impl Bech32Encode for Bytes {
+ const BECH32_HRP: &'static str = "lno";
+}
+
+impl AsRef<[u8]> for Bytes {
+ fn as_ref(&self) -> &[u8] {
+ &self.0
+ }
+}
+
+impl TryFrom<Vec<u8>> for Bytes {
+ type Error = ParseError;
+ fn try_from(data: Vec<u8>) -> Result<Self, ParseError> {
+ Ok(Bytes(data))
+ }
+}
+
+impl core::fmt::Display for Bytes {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
+ self.fmt_bech32_str(f)
+ }
+}
+
+pub fn bech32_parse_test<Out: test_logger::Output>(data: &[u8], out: Out) {
+ do_test(data, out);
+}
+
+#[no_mangle]
+pub extern "C" fn bech32_parse_run(data: *const u8, datalen: usize) {
+ do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {});
+}
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on target_template.txt
+// To modify it, modify target_template.txt and run gen_target.sh instead.
+
+#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+
+#[cfg(not(fuzzing))]
+compile_error!("Fuzz targets need cfg=fuzzing");
+
+extern crate lightning_fuzz;
+use lightning_fuzz::bech32_parse::*;
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+ fuzz!(|data| {
+ bech32_parse_run(data.as_ptr(), data.len());
+ });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+ loop {
+ fuzz!(|data| {
+ bech32_parse_run(data.as_ptr(), data.len());
+ });
+ }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+ bech32_parse_run(data.as_ptr(), data.len());
+});
+
+#[cfg(feature = "stdin_fuzz")]
+fn main() {
+ use std::io::Read;
+
+ let mut data = Vec::with_capacity(8192);
+ std::io::stdin().read_to_end(&mut data).unwrap();
+ bech32_parse_run(data.as_ptr(), data.len());
+}
+
+#[test]
+fn run_test_cases() {
+ use std::fs;
+ use std::io::Read;
+ use lightning_fuzz::utils::test_logger::StringBuffer;
+
+ use std::sync::{atomic, Arc};
+ {
+ let data: Vec<u8> = vec![0];
+ bech32_parse_run(data.as_ptr(), data.len());
+ }
+ let mut threads = Vec::new();
+ let threads_running = Arc::new(atomic::AtomicUsize::new(0));
+ if let Ok(tests) = fs::read_dir("test_cases/bech32_parse") {
+ for test in tests {
+ let mut data: Vec<u8> = Vec::new();
+ let path = test.unwrap().path();
+ fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
+ threads_running.fetch_add(1, atomic::Ordering::AcqRel);
+
+ let thread_count_ref = Arc::clone(&threads_running);
+ let main_thread_ref = std::thread::current();
+ threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
+ std::thread::spawn(move || {
+ let string_logger = StringBuffer::new();
+
+ let panic_logger = string_logger.clone();
+ let res = if ::std::panic::catch_unwind(move || {
+ bech32_parse_test(&data, panic_logger);
+ }).is_err() {
+ Some(string_logger.into_string())
+ } else { None };
+ thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
+ main_thread_ref.unpark();
+ res
+ })
+ ));
+ while threads_running.load(atomic::Ordering::Acquire) > 32 {
+ std::thread::park();
+ }
+ }
+ }
+ let mut failed_outputs = Vec::new();
+ for (test, thread) in threads.drain(..) {
+ if let Some(output) = thread.join().unwrap() {
+ println!("\nOutput of {}:\n{}\n", test, output);
+ failed_outputs.push(test);
+ }
+ }
+ if !failed_outputs.is_empty() {
+ println!("Test cases which failed: ");
+ for case in failed_outputs {
+ println!("{}", case);
+ }
+ panic!();
+ }
+}
echo "void $1_run(const unsigned char* data, size_t data_len);" >> ../../targets.h
}
+GEN_TEST bech32_parse
GEN_TEST chanmon_deser
GEN_TEST chanmon_consistency
GEN_TEST full_stack
+GEN_TEST invoice_deser
+GEN_TEST invoice_request_deser
+GEN_TEST offer_deser
GEN_TEST onion_message
GEN_TEST peer_crypt
GEN_TEST process_network_graph
+GEN_TEST refund_deser
GEN_TEST router
GEN_TEST zbase32
GEN_TEST indexedmap
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on target_template.txt
+// To modify it, modify target_template.txt and run gen_target.sh instead.
+
+#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+
+#[cfg(not(fuzzing))]
+compile_error!("Fuzz targets need cfg=fuzzing");
+
+extern crate lightning_fuzz;
+use lightning_fuzz::invoice_deser::*;
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+ fuzz!(|data| {
+ invoice_deser_run(data.as_ptr(), data.len());
+ });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+ loop {
+ fuzz!(|data| {
+ invoice_deser_run(data.as_ptr(), data.len());
+ });
+ }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+ invoice_deser_run(data.as_ptr(), data.len());
+});
+
+#[cfg(feature = "stdin_fuzz")]
+fn main() {
+ use std::io::Read;
+
+ let mut data = Vec::with_capacity(8192);
+ std::io::stdin().read_to_end(&mut data).unwrap();
+ invoice_deser_run(data.as_ptr(), data.len());
+}
+
+#[test]
+fn run_test_cases() {
+ use std::fs;
+ use std::io::Read;
+ use lightning_fuzz::utils::test_logger::StringBuffer;
+
+ use std::sync::{atomic, Arc};
+ {
+ let data: Vec<u8> = vec![0];
+ invoice_deser_run(data.as_ptr(), data.len());
+ }
+ let mut threads = Vec::new();
+ let threads_running = Arc::new(atomic::AtomicUsize::new(0));
+ if let Ok(tests) = fs::read_dir("test_cases/invoice_deser") {
+ for test in tests {
+ let mut data: Vec<u8> = Vec::new();
+ let path = test.unwrap().path();
+ fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
+ threads_running.fetch_add(1, atomic::Ordering::AcqRel);
+
+ let thread_count_ref = Arc::clone(&threads_running);
+ let main_thread_ref = std::thread::current();
+ threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
+ std::thread::spawn(move || {
+ let string_logger = StringBuffer::new();
+
+ let panic_logger = string_logger.clone();
+ let res = if ::std::panic::catch_unwind(move || {
+ invoice_deser_test(&data, panic_logger);
+ }).is_err() {
+ Some(string_logger.into_string())
+ } else { None };
+ thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
+ main_thread_ref.unpark();
+ res
+ })
+ ));
+ while threads_running.load(atomic::Ordering::Acquire) > 32 {
+ std::thread::park();
+ }
+ }
+ }
+ let mut failed_outputs = Vec::new();
+ for (test, thread) in threads.drain(..) {
+ if let Some(output) = thread.join().unwrap() {
+ println!("\nOutput of {}:\n{}\n", test, output);
+ failed_outputs.push(test);
+ }
+ }
+ if !failed_outputs.is_empty() {
+ println!("Test cases which failed: ");
+ for case in failed_outputs {
+ println!("{}", case);
+ }
+ panic!();
+ }
+}
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on target_template.txt
+// To modify it, modify target_template.txt and run gen_target.sh instead.
+
+#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+
+#[cfg(not(fuzzing))]
+compile_error!("Fuzz targets need cfg=fuzzing");
+
+extern crate lightning_fuzz;
+use lightning_fuzz::invoice_request_deser::*;
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+ fuzz!(|data| {
+ invoice_request_deser_run(data.as_ptr(), data.len());
+ });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+ loop {
+ fuzz!(|data| {
+ invoice_request_deser_run(data.as_ptr(), data.len());
+ });
+ }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+ invoice_request_deser_run(data.as_ptr(), data.len());
+});
+
+#[cfg(feature = "stdin_fuzz")]
+fn main() {
+ use std::io::Read;
+
+ let mut data = Vec::with_capacity(8192);
+ std::io::stdin().read_to_end(&mut data).unwrap();
+ invoice_request_deser_run(data.as_ptr(), data.len());
+}
+
+#[test]
+fn run_test_cases() {
+ use std::fs;
+ use std::io::Read;
+ use lightning_fuzz::utils::test_logger::StringBuffer;
+
+ use std::sync::{atomic, Arc};
+ {
+ let data: Vec<u8> = vec![0];
+ invoice_request_deser_run(data.as_ptr(), data.len());
+ }
+ let mut threads = Vec::new();
+ let threads_running = Arc::new(atomic::AtomicUsize::new(0));
+ if let Ok(tests) = fs::read_dir("test_cases/invoice_request_deser") {
+ for test in tests {
+ let mut data: Vec<u8> = Vec::new();
+ let path = test.unwrap().path();
+ fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
+ threads_running.fetch_add(1, atomic::Ordering::AcqRel);
+
+ let thread_count_ref = Arc::clone(&threads_running);
+ let main_thread_ref = std::thread::current();
+ threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
+ std::thread::spawn(move || {
+ let string_logger = StringBuffer::new();
+
+ let panic_logger = string_logger.clone();
+ let res = if ::std::panic::catch_unwind(move || {
+ invoice_request_deser_test(&data, panic_logger);
+ }).is_err() {
+ Some(string_logger.into_string())
+ } else { None };
+ thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
+ main_thread_ref.unpark();
+ res
+ })
+ ));
+ while threads_running.load(atomic::Ordering::Acquire) > 32 {
+ std::thread::park();
+ }
+ }
+ }
+ let mut failed_outputs = Vec::new();
+ for (test, thread) in threads.drain(..) {
+ if let Some(output) = thread.join().unwrap() {
+ println!("\nOutput of {}:\n{}\n", test, output);
+ failed_outputs.push(test);
+ }
+ }
+ if !failed_outputs.is_empty() {
+ println!("Test cases which failed: ");
+ for case in failed_outputs {
+ println!("{}", case);
+ }
+ panic!();
+ }
+}
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on target_template.txt
+// To modify it, modify target_template.txt and run gen_target.sh instead.
+
+#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+
+#[cfg(not(fuzzing))]
+compile_error!("Fuzz targets need cfg=fuzzing");
+
+extern crate lightning_fuzz;
+use lightning_fuzz::offer_deser::*;
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+ fuzz!(|data| {
+ offer_deser_run(data.as_ptr(), data.len());
+ });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+ loop {
+ fuzz!(|data| {
+ offer_deser_run(data.as_ptr(), data.len());
+ });
+ }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+ offer_deser_run(data.as_ptr(), data.len());
+});
+
+#[cfg(feature = "stdin_fuzz")]
+fn main() {
+ use std::io::Read;
+
+ let mut data = Vec::with_capacity(8192);
+ std::io::stdin().read_to_end(&mut data).unwrap();
+ offer_deser_run(data.as_ptr(), data.len());
+}
+
+#[test]
+fn run_test_cases() {
+ use std::fs;
+ use std::io::Read;
+ use lightning_fuzz::utils::test_logger::StringBuffer;
+
+ use std::sync::{atomic, Arc};
+ {
+ let data: Vec<u8> = vec![0];
+ offer_deser_run(data.as_ptr(), data.len());
+ }
+ let mut threads = Vec::new();
+ let threads_running = Arc::new(atomic::AtomicUsize::new(0));
+ if let Ok(tests) = fs::read_dir("test_cases/offer_deser") {
+ for test in tests {
+ let mut data: Vec<u8> = Vec::new();
+ let path = test.unwrap().path();
+ fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
+ threads_running.fetch_add(1, atomic::Ordering::AcqRel);
+
+ let thread_count_ref = Arc::clone(&threads_running);
+ let main_thread_ref = std::thread::current();
+ threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
+ std::thread::spawn(move || {
+ let string_logger = StringBuffer::new();
+
+ let panic_logger = string_logger.clone();
+ let res = if ::std::panic::catch_unwind(move || {
+ offer_deser_test(&data, panic_logger);
+ }).is_err() {
+ Some(string_logger.into_string())
+ } else { None };
+ thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
+ main_thread_ref.unpark();
+ res
+ })
+ ));
+ while threads_running.load(atomic::Ordering::Acquire) > 32 {
+ std::thread::park();
+ }
+ }
+ }
+ let mut failed_outputs = Vec::new();
+ for (test, thread) in threads.drain(..) {
+ if let Some(output) = thread.join().unwrap() {
+ println!("\nOutput of {}:\n{}\n", test, output);
+ failed_outputs.push(test);
+ }
+ }
+ if !failed_outputs.is_empty() {
+ println!("Test cases which failed: ");
+ for case in failed_outputs {
+ println!("{}", case);
+ }
+ panic!();
+ }
+}
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on target_template.txt
+// To modify it, modify target_template.txt and run gen_target.sh instead.
+
+#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+
+#[cfg(not(fuzzing))]
+compile_error!("Fuzz targets need cfg=fuzzing");
+
+extern crate lightning_fuzz;
+use lightning_fuzz::refund_deser::*;
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+ fuzz!(|data| {
+ refund_deser_run(data.as_ptr(), data.len());
+ });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+ loop {
+ fuzz!(|data| {
+ refund_deser_run(data.as_ptr(), data.len());
+ });
+ }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+ refund_deser_run(data.as_ptr(), data.len());
+});
+
+#[cfg(feature = "stdin_fuzz")]
+fn main() {
+ use std::io::Read;
+
+ let mut data = Vec::with_capacity(8192);
+ std::io::stdin().read_to_end(&mut data).unwrap();
+ refund_deser_run(data.as_ptr(), data.len());
+}
+
+#[test]
+fn run_test_cases() {
+ use std::fs;
+ use std::io::Read;
+ use lightning_fuzz::utils::test_logger::StringBuffer;
+
+ use std::sync::{atomic, Arc};
+ {
+ let data: Vec<u8> = vec![0];
+ refund_deser_run(data.as_ptr(), data.len());
+ }
+ let mut threads = Vec::new();
+ let threads_running = Arc::new(atomic::AtomicUsize::new(0));
+ if let Ok(tests) = fs::read_dir("test_cases/refund_deser") {
+ for test in tests {
+ let mut data: Vec<u8> = Vec::new();
+ let path = test.unwrap().path();
+ fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
+ threads_running.fetch_add(1, atomic::Ordering::AcqRel);
+
+ let thread_count_ref = Arc::clone(&threads_running);
+ let main_thread_ref = std::thread::current();
+ threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
+ std::thread::spawn(move || {
+ let string_logger = StringBuffer::new();
+
+ let panic_logger = string_logger.clone();
+ let res = if ::std::panic::catch_unwind(move || {
+ refund_deser_test(&data, panic_logger);
+ }).is_err() {
+ Some(string_logger.into_string())
+ } else { None };
+ thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
+ main_thread_ref.unpark();
+ res
+ })
+ ));
+ while threads_running.load(atomic::Ordering::Acquire) > 32 {
+ std::thread::park();
+ }
+ }
+ }
+ let mut failed_outputs = Vec::new();
+ for (test, thread) in threads.drain(..) {
+ if let Some(output) = thread.join().unwrap() {
+ println!("\nOutput of {}:\n{}\n", test, output);
+ failed_outputs.push(test);
+ }
+ }
+ if !failed_outputs.is_empty() {
+ println!("Test cases which failed: ");
+ for case in failed_outputs {
+ println!("{}", case);
+ }
+ panic!();
+ }
+}
action: msgs::ErrorAction::IgnoreError
})
}
- fn notify_payment_path_failed(&self, _path: &[&RouteHop], _short_channel_id: u64) {}
- fn notify_payment_path_successful(&self, _path: &[&RouteHop]) {}
- fn notify_payment_probe_successful(&self, _path: &[&RouteHop]) {}
- fn notify_payment_probe_failed(&self, _path: &[&RouteHop], _short_channel_id: u64) {}
}
pub struct TestBroadcaster {}
// all others. If you hit this panic, the list of acceptable errors
// is probably just stale and you should add new messages here.
match err.as_str() {
- "Peer for first hop currently disconnected/pending monitor update!" => {},
+ "Peer for first hop currently disconnected" => {},
_ if err.starts_with("Cannot push more than their max accepted HTLCs ") => {},
_ if err.starts_with("Cannot send value that would put us over the max HTLC value in flight our peer will accept ") => {},
_ if err.starts_with("Cannot send value that would put our balance under counterparty-announced channel reserve value") => {},
let network = Network::Bitcoin;
let params = ChainParameters {
network,
- best_block: BestBlock::from_genesis(network),
+ best_block: BestBlock::from_network(network),
};
(ChannelManager::new($fee_estimator.clone(), monitor.clone(), broadcast.clone(), &router, Arc::clone(&logger), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), config, params),
monitor, keys_manager)
let mut channel_txn = Vec::new();
macro_rules! make_channel {
($source: expr, $dest: expr, $chan_id: expr) => { {
- $source.peer_connected(&$dest.get_our_node_id(), &Init { features: $dest.init_features(), remote_network_address: None }).unwrap();
- $dest.peer_connected(&$source.get_our_node_id(), &Init { features: $source.init_features(), remote_network_address: None }).unwrap();
+ $source.peer_connected(&$dest.get_our_node_id(), &Init { features: $dest.init_features(), remote_network_address: None }, true).unwrap();
+ $dest.peer_connected(&$source.get_our_node_id(), &Init { features: $source.init_features(), remote_network_address: None }, false).unwrap();
$source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None).unwrap();
let open_channel = {
events::Event::PaymentClaimed { .. } => {},
events::Event::PaymentPathSuccessful { .. } => {},
events::Event::PaymentPathFailed { .. } => {},
+ events::Event::PaymentFailed { .. } => {},
events::Event::ProbeSuccessful { .. } | events::Event::ProbeFailed { .. } => {
// Even though we don't explicitly send probes, because probes are
// detected based on hashing the payment hash+preimage, its rather
0x0c => {
if !chan_a_disconnected {
- nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
- nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
+ nodes[0].peer_disconnected(&nodes[1].get_our_node_id());
+ nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
chan_a_disconnected = true;
drain_msg_events_on_disconnect!(0);
}
},
0x0d => {
if !chan_b_disconnected {
- nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
- nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
+ nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
+ nodes[2].peer_disconnected(&nodes[1].get_our_node_id());
chan_b_disconnected = true;
drain_msg_events_on_disconnect!(2);
}
},
0x0e => {
if chan_a_disconnected {
- nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap();
- nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }).unwrap();
+ nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }, false).unwrap();
chan_a_disconnected = false;
}
},
0x0f => {
if chan_b_disconnected {
- nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }).unwrap();
- nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap();
+ nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }, true).unwrap();
+ nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, false).unwrap();
chan_b_disconnected = false;
}
},
0x2c => {
if !chan_a_disconnected {
- nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
+ nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
chan_a_disconnected = true;
drain_msg_events_on_disconnect!(0);
}
},
0x2d => {
if !chan_a_disconnected {
- nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
+ nodes[0].peer_disconnected(&nodes[1].get_our_node_id());
chan_a_disconnected = true;
nodes[0].get_and_clear_pending_msg_events();
ab_events.clear();
ba_events.clear();
}
if !chan_b_disconnected {
- nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
+ nodes[2].peer_disconnected(&nodes[1].get_our_node_id());
chan_b_disconnected = true;
nodes[2].get_and_clear_pending_msg_events();
bc_events.clear();
},
0x2e => {
if !chan_b_disconnected {
- nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
+ nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
chan_b_disconnected = true;
drain_msg_events_on_disconnect!(2);
}
// Next, make sure peers are all connected to each other
if chan_a_disconnected {
- nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap();
- nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }).unwrap();
+ nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }, false).unwrap();
chan_a_disconnected = false;
}
if chan_b_disconnected {
- nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }).unwrap();
- nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap();
+ nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }, true).unwrap();
+ nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, false).unwrap();
chan_b_disconnected = false;
}
use lightning::ln::msgs::{self, DecodeError};
use lightning::ln::script::ShutdownScript;
use lightning::routing::gossip::{P2PGossipSync, NetworkGraph};
-use lightning::routing::router::{find_route, InFlightHtlcs, PaymentParameters, Route, RouteHop, RouteParameters, Router};
+use lightning::routing::utxo::UtxoLookup;
+use lightning::routing::router::{find_route, InFlightHtlcs, PaymentParameters, Route, RouteParameters, Router};
use lightning::routing::scoring::FixedPenaltyScorer;
use lightning::util::config::UserConfig;
use lightning::util::errors::APIError;
action: msgs::ErrorAction::IgnoreError
})
}
- fn notify_payment_path_failed(&self, _path: &[&RouteHop], _short_channel_id: u64) {}
- fn notify_payment_path_successful(&self, _path: &[&RouteHop]) {}
- fn notify_payment_probe_successful(&self, _path: &[&RouteHop]) {}
- fn notify_payment_probe_failed(&self, _path: &[&RouteHop], _short_channel_id: u64) {}
}
struct TestBroadcaster {
type ChannelMan<'a> = ChannelManager<
Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<KeyProvider>, Arc<KeyProvider>, Arc<FuzzEstimator>, &'a FuzzRouter, Arc<dyn Logger>>;
-type PeerMan<'a> = PeerManager<Peer<'a>, Arc<ChannelMan<'a>>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<dyn Logger>>>, Arc<dyn chain::Access>, Arc<dyn Logger>>>, IgnoringMessageHandler, Arc<dyn Logger>, IgnoringMessageHandler, Arc<KeyProvider>>;
+type PeerMan<'a> = PeerManager<Peer<'a>, Arc<ChannelMan<'a>>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<dyn Logger>>>, Arc<dyn UtxoLookup>, Arc<dyn Logger>>>, IgnoringMessageHandler, Arc<dyn Logger>, IgnoringMessageHandler, Arc<KeyProvider>>;
struct MoneyLossDetector<'a> {
manager: Arc<ChannelMan<'a>>,
let network = Network::Bitcoin;
let params = ChainParameters {
network,
- best_block: BestBlock::from_genesis(network),
+ best_block: BestBlock::from_network(network),
};
let channelmanager = Arc::new(ChannelManager::new(fee_est.clone(), monitor.clone(), broadcast.clone(), &router, Arc::clone(&logger), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), config, params));
// Adding new calls to `EntropySource::get_secure_random_bytes` during startup can change all the
// it's easier to just increment the counter here so the keys don't change.
keys_manager.counter.fetch_sub(3, Ordering::AcqRel);
let our_id = &keys_manager.get_node_id(Recipient::Node).unwrap();
- let network_graph = Arc::new(NetworkGraph::new(genesis_block(network).block_hash(), Arc::clone(&logger)));
+ let network_graph = Arc::new(NetworkGraph::new(network, Arc::clone(&logger)));
let gossip_sync = Arc::new(P2PGossipSync::new(Arc::clone(&network_graph), None, Arc::clone(&logger)));
let scorer = FixedPenaltyScorer::with_penalty(0);
let params = RouteParameters {
payment_params,
final_value_msat,
- final_cltv_expiry_delta: 42,
};
let random_seed_bytes: [u8; 32] = keys_manager.get_secure_random_bytes();
let route = match find_route(&our_id, ¶ms, &network_graph, None, Arc::clone(&logger), &scorer, &random_seed_bytes) {
let params = RouteParameters {
payment_params,
final_value_msat,
- final_cltv_expiry_delta: 42,
};
let random_seed_bytes: [u8; 32] = keys_manager.get_secure_random_bytes();
let mut route = match find_route(&our_id, ¶ms, &network_graph, None, Arc::clone(&logger), &scorer, &random_seed_bytes) {
if let Err(e) = channelmanager.funding_transaction_generated(&funding_generation.0, &funding_generation.1, tx.clone()) {
// It's possible the channel has been closed in the mean time, but any other
// failure may be a bug.
- if let APIError::ChannelUnavailable { err } = e {
- if !err.starts_with("Can't find a peer matching the passed counterparty node_id ") {
- assert_eq!(err, "No such channel");
- }
- } else { panic!(); }
+ if let APIError::ChannelUnavailable { .. } = e { } else { panic!(); }
}
pending_funding_signatures.insert(funding_output, tx);
}
use crate::utils::test_logger;
-fn check_eq(btree: &BTreeMap<u8, u8>, indexed: &IndexedMap<u8, u8>) {
+use std::ops::{RangeBounds, Bound};
+
+struct ExclLowerInclUpper(u8, u8);
+impl RangeBounds<u8> for ExclLowerInclUpper {
+ fn start_bound(&self) -> Bound<&u8> { Bound::Excluded(&self.0) }
+ fn end_bound(&self) -> Bound<&u8> { Bound::Included(&self.1) }
+}
+struct ExclLowerExclUpper(u8, u8);
+impl RangeBounds<u8> for ExclLowerExclUpper {
+ fn start_bound(&self) -> Bound<&u8> { Bound::Excluded(&self.0) }
+ fn end_bound(&self) -> Bound<&u8> { Bound::Excluded(&self.1) }
+}
+
+fn check_eq(btree: &BTreeMap<u8, u8>, mut indexed: IndexedMap<u8, u8>) {
assert_eq!(btree.len(), indexed.len());
assert_eq!(btree.is_empty(), indexed.is_empty());
let mut btree_clone = btree.clone();
assert!(btree_clone == *btree);
let mut indexed_clone = indexed.clone();
- assert!(indexed_clone == *indexed);
+ assert!(indexed_clone == indexed);
for k in 0..=255 {
assert_eq!(btree.contains_key(&k), indexed.contains_key(&k));
}
const STRIDE: u8 = 16;
- for k in 0..=255/STRIDE {
- let lower_bound = k * STRIDE;
- let upper_bound = lower_bound + (STRIDE - 1);
- let mut btree_iter = btree.range(lower_bound..=upper_bound);
- let mut indexed_iter = indexed.range(lower_bound..=upper_bound);
- loop {
- let b_v = btree_iter.next();
- let i_v = indexed_iter.next();
- assert_eq!(b_v, i_v);
- if b_v.is_none() { break; }
+ for range_type in 0..4 {
+ for k in 0..=255/STRIDE {
+ let lower_bound = k * STRIDE;
+ let upper_bound = lower_bound + (STRIDE - 1);
+ macro_rules! range { ($map: expr) => {
+ match range_type {
+ 0 => $map.range(lower_bound..upper_bound),
+ 1 => $map.range(lower_bound..=upper_bound),
+ 2 => $map.range(ExclLowerInclUpper(lower_bound, upper_bound)),
+ 3 => $map.range(ExclLowerExclUpper(lower_bound, upper_bound)),
+ _ => unreachable!(),
+ }
+ } }
+ let mut btree_iter = range!(btree);
+ let mut indexed_iter = range!(indexed);
+ loop {
+ let b_v = btree_iter.next();
+ let i_v = indexed_iter.next();
+ assert_eq!(b_v, i_v);
+ if b_v.is_none() { break; }
+ }
}
}
let prev_value_i = indexed.insert(tuple[0], tuple[1]);
assert_eq!(prev_value_b, prev_value_i);
}
- check_eq(&btree, &indexed);
+ check_eq(&btree, indexed.clone());
// Now, modify the maps in all the ways we have to do so, checking that the maps remain
// equivalent as we go.
*v = *k;
*btree.get_mut(k).unwrap() = *k;
}
- check_eq(&btree, &indexed);
+ check_eq(&btree, indexed.clone());
for k in 0..=255 {
match btree.entry(k) {
},
}
}
- check_eq(&btree, &indexed);
+ check_eq(&btree, indexed);
}
pub fn indexedmap_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use crate::utils::test_logger;
+use lightning::offers::invoice::Invoice;
+use lightning::util::ser::Writeable;
+use std::convert::TryFrom;
+
+#[inline]
+pub fn do_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
+ if let Ok(invoice) = Invoice::try_from(data.to_vec()) {
+ let mut bytes = Vec::with_capacity(data.len());
+ invoice.write(&mut bytes).unwrap();
+ assert_eq!(data, bytes);
+ }
+}
+
+pub fn invoice_deser_test<Out: test_logger::Output>(data: &[u8], out: Out) {
+ do_test(data, out);
+}
+
+#[no_mangle]
+pub extern "C" fn invoice_deser_run(data: *const u8, datalen: usize) {
+ do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {});
+}
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use bitcoin::secp256k1::{KeyPair, Parity, PublicKey, Secp256k1, SecretKey, self};
+use crate::utils::test_logger;
+use core::convert::{Infallible, TryFrom};
+use lightning::chain::keysinterface::EntropySource;
+use lightning::ln::PaymentHash;
+use lightning::ln::features::BlindedHopFeatures;
+use lightning::offers::invoice::{BlindedPayInfo, UnsignedInvoice};
+use lightning::offers::invoice_request::InvoiceRequest;
+use lightning::offers::parse::SemanticError;
+use lightning::onion_message::BlindedPath;
+use lightning::util::ser::Writeable;
+
+#[inline]
+pub fn do_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
+ if let Ok(invoice_request) = InvoiceRequest::try_from(data.to_vec()) {
+ let mut bytes = Vec::with_capacity(data.len());
+ invoice_request.write(&mut bytes).unwrap();
+ assert_eq!(data, bytes);
+
+ let secp_ctx = Secp256k1::new();
+ let keys = KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+ let mut buffer = Vec::new();
+
+ if let Ok(unsigned_invoice) = build_response(&invoice_request, &secp_ctx) {
+ let signing_pubkey = unsigned_invoice.signing_pubkey();
+ let (x_only_pubkey, _) = keys.x_only_public_key();
+ let odd_pubkey = x_only_pubkey.public_key(Parity::Odd);
+ let even_pubkey = x_only_pubkey.public_key(Parity::Even);
+ if signing_pubkey == odd_pubkey || signing_pubkey == even_pubkey {
+ unsigned_invoice
+ .sign::<_, Infallible>(
+ |digest| Ok(secp_ctx.sign_schnorr_no_aux_rand(digest, &keys))
+ )
+ .unwrap()
+ .write(&mut buffer)
+ .unwrap();
+ } else {
+ unsigned_invoice
+ .sign::<_, Infallible>(
+ |digest| Ok(secp_ctx.sign_schnorr_no_aux_rand(digest, &keys))
+ )
+ .unwrap_err();
+ }
+ }
+ }
+}
+
+struct Randomness;
+
+impl EntropySource for Randomness {
+ fn get_secure_random_bytes(&self) -> [u8; 32] { [42; 32] }
+}
+
+fn pubkey(byte: u8) -> PublicKey {
+ let secp_ctx = Secp256k1::new();
+ PublicKey::from_secret_key(&secp_ctx, &privkey(byte))
+}
+
+fn privkey(byte: u8) -> SecretKey {
+ SecretKey::from_slice(&[byte; 32]).unwrap()
+}
+
+fn build_response<'a, T: secp256k1::Signing + secp256k1::Verification>(
+ invoice_request: &'a InvoiceRequest, secp_ctx: &Secp256k1<T>
+) -> Result<UnsignedInvoice<'a>, SemanticError> {
+ let entropy_source = Randomness {};
+ let paths = vec![
+ BlindedPath::new(&[pubkey(43), pubkey(44), pubkey(42)], &entropy_source, secp_ctx).unwrap(),
+ BlindedPath::new(&[pubkey(45), pubkey(46), pubkey(42)], &entropy_source, secp_ctx).unwrap(),
+ ];
+
+ let payinfo = vec![
+ BlindedPayInfo {
+ fee_base_msat: 1,
+ fee_proportional_millionths: 1_000,
+ cltv_expiry_delta: 42,
+ htlc_minimum_msat: 100,
+ htlc_maximum_msat: 1_000_000_000_000,
+ features: BlindedHopFeatures::empty(),
+ },
+ BlindedPayInfo {
+ fee_base_msat: 1,
+ fee_proportional_millionths: 1_000,
+ cltv_expiry_delta: 42,
+ htlc_minimum_msat: 100,
+ htlc_maximum_msat: 1_000_000_000_000,
+ features: BlindedHopFeatures::empty(),
+ },
+ ];
+
+ let payment_paths = paths.into_iter().zip(payinfo.into_iter()).collect();
+ let payment_hash = PaymentHash([42; 32]);
+ invoice_request.respond_with(payment_paths, payment_hash)?.build()
+}
+
+pub fn invoice_request_deser_test<Out: test_logger::Output>(data: &[u8], out: Out) {
+ do_test(data, out);
+}
+
+#[no_mangle]
+pub extern "C" fn invoice_request_deser_run(data: *const u8, datalen: usize) {
+ do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {});
+}
pub mod utils;
+pub mod bech32_parse;
pub mod chanmon_deser;
pub mod chanmon_consistency;
pub mod full_stack;
pub mod indexedmap;
+pub mod invoice_deser;
+pub mod invoice_request_deser;
+pub mod offer_deser;
pub mod onion_message;
pub mod peer_crypt;
pub mod process_network_graph;
+pub mod refund_deser;
pub mod router;
pub mod zbase32;
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use bitcoin::secp256k1::{KeyPair, PublicKey, Secp256k1, SecretKey};
+use crate::utils::test_logger;
+use core::convert::{Infallible, TryFrom};
+use lightning::offers::invoice_request::UnsignedInvoiceRequest;
+use lightning::offers::offer::{Amount, Offer, Quantity};
+use lightning::offers::parse::SemanticError;
+use lightning::util::ser::Writeable;
+
+#[inline]
+pub fn do_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
+ if let Ok(offer) = Offer::try_from(data.to_vec()) {
+ let mut bytes = Vec::with_capacity(data.len());
+ offer.write(&mut bytes).unwrap();
+ assert_eq!(data, bytes);
+
+ let secp_ctx = Secp256k1::new();
+ let keys = KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+ let pubkey = PublicKey::from(keys);
+ let mut buffer = Vec::new();
+
+ if let Ok(invoice_request) = build_response(&offer, pubkey) {
+ invoice_request
+ .sign::<_, Infallible>(
+ |digest| Ok(secp_ctx.sign_schnorr_no_aux_rand(digest, &keys))
+ )
+ .unwrap()
+ .write(&mut buffer)
+ .unwrap();
+ }
+ }
+}
+
+fn build_response<'a>(
+ offer: &'a Offer, pubkey: PublicKey
+) -> Result<UnsignedInvoiceRequest<'a>, SemanticError> {
+ let mut builder = offer.request_invoice(vec![42; 64], pubkey)?;
+
+ builder = match offer.amount() {
+ None => builder.amount_msats(1000).unwrap(),
+ Some(Amount::Bitcoin { amount_msats }) => builder.amount_msats(amount_msats + 1)?,
+ Some(Amount::Currency { .. }) => return Err(SemanticError::UnsupportedCurrency),
+ };
+
+ builder = match offer.supported_quantity() {
+ Quantity::Bounded(n) => builder.quantity(n.get()).unwrap(),
+ Quantity::Unbounded => builder.quantity(10).unwrap(),
+ Quantity::One => builder,
+ };
+
+ builder.build()
+}
+
+pub fn offer_deser_test<Out: test_logger::Output>(data: &[u8], out: Out) {
+ do_test(data, out);
+}
+
+#[no_mangle]
+pub extern "C" fn offer_deser_run(data: *const u8, datalen: usize) {
+ do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {});
+}
// Imports that need to be added manually
use lightning_rapid_gossip_sync::RapidGossipSync;
-use bitcoin::hashes::Hash as TraitImport;
use crate::utils::test_logger;
/// Actual fuzz test, method signature and name are fixed
fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
- let block_hash = bitcoin::BlockHash::all_zeros();
let logger = test_logger::TestLogger::new("".to_owned(), out);
- let network_graph = lightning::routing::gossip::NetworkGraph::new(block_hash, &logger);
- let rapid_sync = RapidGossipSync::new(&network_graph);
+ let network_graph = lightning::routing::gossip::NetworkGraph::new(bitcoin::Network::Bitcoin, &logger);
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let _ = rapid_sync.update_network_graph(data);
}
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use bitcoin::secp256k1::{KeyPair, PublicKey, Secp256k1, SecretKey, self};
+use crate::utils::test_logger;
+use core::convert::{Infallible, TryFrom};
+use lightning::chain::keysinterface::EntropySource;
+use lightning::ln::PaymentHash;
+use lightning::ln::features::BlindedHopFeatures;
+use lightning::offers::invoice::{BlindedPayInfo, UnsignedInvoice};
+use lightning::offers::parse::SemanticError;
+use lightning::offers::refund::Refund;
+use lightning::onion_message::BlindedPath;
+use lightning::util::ser::Writeable;
+
+#[inline]
+pub fn do_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
+ if let Ok(refund) = Refund::try_from(data.to_vec()) {
+ let mut bytes = Vec::with_capacity(data.len());
+ refund.write(&mut bytes).unwrap();
+ assert_eq!(data, bytes);
+
+ let secp_ctx = Secp256k1::new();
+ let keys = KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+ let pubkey = PublicKey::from(keys);
+ let mut buffer = Vec::new();
+
+ if let Ok(invoice) = build_response(&refund, pubkey, &secp_ctx) {
+ invoice
+ .sign::<_, Infallible>(
+ |digest| Ok(secp_ctx.sign_schnorr_no_aux_rand(digest, &keys))
+ )
+ .unwrap()
+ .write(&mut buffer)
+ .unwrap();
+ }
+ }
+}
+
+struct Randomness;
+
+impl EntropySource for Randomness {
+ fn get_secure_random_bytes(&self) -> [u8; 32] { [42; 32] }
+}
+
+fn pubkey(byte: u8) -> PublicKey {
+ let secp_ctx = Secp256k1::new();
+ PublicKey::from_secret_key(&secp_ctx, &privkey(byte))
+}
+
+fn privkey(byte: u8) -> SecretKey {
+ SecretKey::from_slice(&[byte; 32]).unwrap()
+}
+
+fn build_response<'a, T: secp256k1::Signing + secp256k1::Verification>(
+ refund: &'a Refund, signing_pubkey: PublicKey, secp_ctx: &Secp256k1<T>
+) -> Result<UnsignedInvoice<'a>, SemanticError> {
+ let entropy_source = Randomness {};
+ let paths = vec![
+ BlindedPath::new(&[pubkey(43), pubkey(44), pubkey(42)], &entropy_source, secp_ctx).unwrap(),
+ BlindedPath::new(&[pubkey(45), pubkey(46), pubkey(42)], &entropy_source, secp_ctx).unwrap(),
+ ];
+
+ let payinfo = vec![
+ BlindedPayInfo {
+ fee_base_msat: 1,
+ fee_proportional_millionths: 1_000,
+ cltv_expiry_delta: 42,
+ htlc_minimum_msat: 100,
+ htlc_maximum_msat: 1_000_000_000_000,
+ features: BlindedHopFeatures::empty(),
+ },
+ BlindedPayInfo {
+ fee_base_msat: 1,
+ fee_proportional_millionths: 1_000,
+ cltv_expiry_delta: 42,
+ htlc_minimum_msat: 100,
+ htlc_maximum_msat: 1_000_000_000_000,
+ features: BlindedHopFeatures::empty(),
+ },
+ ];
+
+ let payment_paths = paths.into_iter().zip(payinfo.into_iter()).collect();
+ let payment_hash = PaymentHash([42; 32]);
+ refund.respond_with(payment_paths, payment_hash, signing_pubkey)?.build()
+}
+
+pub fn refund_deser_test<Out: test_logger::Output>(data: &[u8], out: Out) {
+ do_test(data, out);
+}
+
+#[no_mangle]
+pub extern "C" fn refund_deser_run(data: *const u8, datalen: usize) {
+ do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {});
+}
use bitcoin::blockdata::transaction::TxOut;
use bitcoin::hash_types::BlockHash;
-use lightning::chain;
use lightning::chain::transaction::OutPoint;
use lightning::ln::channelmanager::{self, ChannelDetails, ChannelCounterparty};
use lightning::ln::msgs;
use lightning::routing::gossip::{NetworkGraph, RoutingFees};
+use lightning::routing::utxo::{UtxoFuture, UtxoLookup, UtxoLookupError, UtxoResult};
use lightning::routing::router::{find_route, PaymentParameters, RouteHint, RouteHintHop, RouteParameters};
-use lightning::routing::scoring::FixedPenaltyScorer;
+use lightning::routing::scoring::ProbabilisticScorer;
use lightning::util::config::UserConfig;
use lightning::util::ser::Readable;
use bitcoin::hashes::Hash;
use bitcoin::secp256k1::PublicKey;
use bitcoin::network::constants::Network;
-use bitcoin::blockdata::constants::genesis_block;
use crate::utils::test_logger;
}
}
-struct FuzzChainSource {
+struct FuzzChainSource<'a, 'b, Out: test_logger::Output> {
input: Arc<InputData>,
+ net_graph: &'a NetworkGraph<&'b test_logger::TestLogger<Out>>,
}
-impl chain::Access for FuzzChainSource {
- fn get_utxo(&self, _genesis_hash: &BlockHash, _short_channel_id: u64) -> Result<TxOut, chain::AccessError> {
- match self.input.get_slice(2) {
- Some(&[0, _]) => Err(chain::AccessError::UnknownChain),
- Some(&[1, _]) => Err(chain::AccessError::UnknownTx),
- Some(&[_, x]) => Ok(TxOut { value: 0, script_pubkey: Builder::new().push_int(x as i64).into_script().to_v0_p2wsh() }),
- None => Err(chain::AccessError::UnknownTx),
- _ => unreachable!(),
+impl<Out: test_logger::Output> UtxoLookup for FuzzChainSource<'_, '_, Out> {
+ fn get_utxo(&self, _genesis_hash: &BlockHash, _short_channel_id: u64) -> UtxoResult {
+ let input_slice = self.input.get_slice(2);
+ if input_slice.is_none() { return UtxoResult::Sync(Err(UtxoLookupError::UnknownTx)); }
+ let input_slice = input_slice.unwrap();
+ let txo_res = TxOut {
+ value: if input_slice[0] % 2 == 0 { 1_000_000 } else { 1_000 },
+ script_pubkey: Builder::new().push_int(input_slice[1] as i64).into_script().to_v0_p2wsh(),
+ };
+ match input_slice {
+ &[0, _] => UtxoResult::Sync(Err(UtxoLookupError::UnknownChain)),
+ &[1, _] => UtxoResult::Sync(Err(UtxoLookupError::UnknownTx)),
+ &[2, _] => {
+ let future = UtxoFuture::new();
+ future.resolve_without_forwarding(self.net_graph, Ok(txo_res));
+ UtxoResult::Async(future.clone())
+ },
+ &[3, _] => {
+ let future = UtxoFuture::new();
+ future.resolve_without_forwarding(self.net_graph, Err(UtxoLookupError::UnknownTx));
+ UtxoResult::Async(future.clone())
+ },
+ &[4, _] => {
+ UtxoResult::Async(UtxoFuture::new()) // the future will never resolve
+ },
+ &[..] => UtxoResult::Sync(Ok(txo_res)),
}
}
}
let logger = test_logger::TestLogger::new("".to_owned(), out);
let our_pubkey = get_pubkey!();
- let net_graph = NetworkGraph::new(genesis_block(Network::Bitcoin).header.block_hash(), &logger);
+ let net_graph = NetworkGraph::new(Network::Bitcoin, &logger);
+ let chain_source = FuzzChainSource {
+ input: Arc::clone(&input),
+ net_graph: &net_graph,
+ };
let mut node_pks = HashSet::new();
let mut scid = 42;
let msg = decode_msg_with_len16!(msgs::UnsignedChannelAnnouncement, 32+8+33*4);
node_pks.insert(get_pubkey_from_node_id!(msg.node_id_1));
node_pks.insert(get_pubkey_from_node_id!(msg.node_id_2));
- let _ = net_graph.update_channel_from_unsigned_announcement::<&FuzzChainSource>(&msg, &None);
+ let _ = net_graph.update_channel_from_unsigned_announcement::
+ <&FuzzChainSource<'_, '_, Out>>(&msg, &None);
},
2 => {
let msg = decode_msg_with_len16!(msgs::UnsignedChannelAnnouncement, 32+8+33*4);
node_pks.insert(get_pubkey_from_node_id!(msg.node_id_1));
node_pks.insert(get_pubkey_from_node_id!(msg.node_id_2));
- let _ = net_graph.update_channel_from_unsigned_announcement(&msg, &Some(&FuzzChainSource { input: Arc::clone(&input) }));
+ let _ = net_graph.update_channel_from_unsigned_announcement(&msg, &Some(&chain_source));
},
3 => {
let _ = net_graph.update_channel_unsigned(&decode_msg!(msgs::UnsignedChannelUpdate, 72));
inbound_htlc_minimum_msat: None,
inbound_htlc_maximum_msat: None,
config: None,
+ feerate_sat_per_1000_weight: None,
});
}
Some(&first_hops_vec[..])
}]));
}
}
- let scorer = FixedPenaltyScorer::with_penalty(0);
+ let scorer = ProbabilisticScorer::new(Default::default(), &net_graph, &logger);
let random_seed_bytes: [u8; 32] = [get_slice!(1)[0]; 32];
for target in node_pks.iter() {
let final_value_msat = slice_to_be64(get_slice!(8));
payment_params: PaymentParameters::from_node_id(*target, final_cltv_expiry_delta)
.with_route_hints(last_hops.clone()),
final_value_msat,
- final_cltv_expiry_delta,
};
let _ = find_route(&our_pubkey, &route_params, &net_graph,
first_hops.map(|c| c.iter().collect::<Vec<_>>()).as_ref().map(|a| a.as_slice()),
#include <stdint.h>
+void bech32_parse_run(const unsigned char* data, size_t data_len);
void chanmon_deser_run(const unsigned char* data, size_t data_len);
void chanmon_consistency_run(const unsigned char* data, size_t data_len);
void full_stack_run(const unsigned char* data, size_t data_len);
+void invoice_deser_run(const unsigned char* data, size_t data_len);
+void invoice_request_deser_run(const unsigned char* data, size_t data_len);
+void offer_deser_run(const unsigned char* data, size_t data_len);
void onion_message_run(const unsigned char* data, size_t data_len);
void peer_crypt_run(const unsigned char* data, size_t data_len);
void process_network_graph_run(const unsigned char* data, size_t data_len);
+void refund_deser_run(const unsigned char* data, size_t data_len);
void router_run(const unsigned char* data, size_t data_len);
void zbase32_run(const unsigned char* data, size_t data_len);
void indexedmap_run(const unsigned char* data, size_t data_len);
[package]
name = "lightning-background-processor"
-version = "0.0.113"
+version = "0.0.114"
authors = ["Valentine Wallace <vwallace@protonmail.com>"]
license = "MIT OR Apache-2.0"
repository = "http://github.com/lightningdevkit/rust-lightning"
[dependencies]
bitcoin = { version = "0.29.0", default-features = false }
-lightning = { version = "0.0.113", path = "../lightning", default-features = false }
-lightning-rapid-gossip-sync = { version = "0.0.113", path = "../lightning-rapid-gossip-sync", default-features = false }
+lightning = { version = "0.0.114", path = "../lightning", default-features = false }
+lightning-rapid-gossip-sync = { version = "0.0.114", path = "../lightning-rapid-gossip-sync", default-features = false }
futures-util = { version = "0.3", default-features = false, features = ["async-await-macro"], optional = true }
[dev-dependencies]
-lightning = { version = "0.0.113", path = "../lightning", features = ["_test_utils"] }
-lightning-invoice = { version = "0.21.0", path = "../lightning-invoice" }
-lightning-persister = { version = "0.0.113", path = "../lightning-persister" }
+lightning = { version = "0.0.114", path = "../lightning", features = ["_test_utils"] }
+lightning-invoice = { version = "0.22.0", path = "../lightning-invoice" }
+lightning-persister = { version = "0.0.114", path = "../lightning-persister" }
use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
+use lightning::routing::utxo::UtxoLookup;
use lightning::routing::router::Router;
use lightning::routing::scoring::{Score, WriteableScore};
-use lightning::util::events::{Event, EventHandler, EventsProvider};
+use lightning::util::events::{Event, PathFailure};
+#[cfg(feature = "std")]
+use lightning::util::events::{EventHandler, EventsProvider};
use lightning::util::logger::Logger;
use lightning::util::persist::Persister;
use lightning_rapid_gossip_sync::RapidGossipSync;
-use lightning::io;
use core::ops::Deref;
use core::time::Duration;
/// Either [`P2PGossipSync`] or [`RapidGossipSync`].
pub enum GossipSync<
- P: Deref<Target = P2PGossipSync<G, A, L>>,
+ P: Deref<Target = P2PGossipSync<G, U, L>>,
R: Deref<Target = RapidGossipSync<G, L>>,
G: Deref<Target = NetworkGraph<L>>,
- A: Deref,
+ U: Deref,
L: Deref,
>
-where A::Target: chain::Access, L::Target: Logger {
+where U::Target: UtxoLookup, L::Target: Logger {
/// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
P2P(P),
/// Rapid gossip sync from a trusted server.
}
impl<
- P: Deref<Target = P2PGossipSync<G, A, L>>,
+ P: Deref<Target = P2PGossipSync<G, U, L>>,
R: Deref<Target = RapidGossipSync<G, L>>,
G: Deref<Target = NetworkGraph<L>>,
- A: Deref,
+ U: Deref,
L: Deref,
-> GossipSync<P, R, G, A, L>
-where A::Target: chain::Access, L::Target: Logger {
+> GossipSync<P, R, G, U, L>
+where U::Target: UtxoLookup, L::Target: Logger {
fn network_graph(&self) -> Option<&G> {
match self {
GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
}
/// (C-not exported) as the bindings concretize everything and have constructors for us
-impl<P: Deref<Target = P2PGossipSync<G, A, L>>, G: Deref<Target = NetworkGraph<L>>, A: Deref, L: Deref>
- GossipSync<P, &RapidGossipSync<G, L>, G, A, L>
+impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
+ GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
where
- A::Target: chain::Access,
+ U::Target: UtxoLookup,
L::Target: Logger,
{
/// Initializes a new [`GossipSync::P2P`] variant.
/// (C-not exported) as the bindings concretize everything and have constructors for us
impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
GossipSync<
- &P2PGossipSync<G, &'a (dyn chain::Access + Send + Sync), L>,
+ &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
R,
G,
- &'a (dyn chain::Access + Send + Sync),
+ &'a (dyn UtxoLookup + Send + Sync),
L,
>
where
/// (C-not exported) as the bindings concretize everything and have constructors for us
impl<'a, L: Deref>
GossipSync<
- &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn chain::Access + Send + Sync), L>,
+ &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
&RapidGossipSync<&'a NetworkGraph<L>, L>,
&'a NetworkGraph<L>,
- &'a (dyn chain::Access + Send + Sync),
+ &'a (dyn UtxoLookup + Send + Sync),
L,
>
where
fn handle_network_graph_update<L: Deref>(
network_graph: &NetworkGraph<L>, event: &Event
) where L::Target: Logger {
- if let Event::PaymentPathFailed { ref network_update, .. } = event {
- if let Some(network_update) = network_update {
- network_graph.handle_network_update(&network_update);
- }
+ if let Event::PaymentPathFailed {
+ failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
+ {
+ network_graph.handle_network_update(upd);
}
}
log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
}
- last_prune_call = $get_timer(NETWORK_PRUNE_TIMER);
have_pruned = true;
}
+ last_prune_call = $get_timer(NETWORK_PRUNE_TIMER);
}
if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
#[cfg(feature = "futures")]
pub async fn process_events_async<
'a,
- CA: 'static + Deref + Send + Sync,
+ UL: 'static + Deref + Send + Sync,
CF: 'static + Deref + Send + Sync,
CW: 'static + Deref + Send + Sync,
T: 'static + Deref + Send + Sync,
PS: 'static + Deref + Send,
M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
- PGS: 'static + Deref<Target = P2PGossipSync<G, CA, L>> + Send + Sync,
+ PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
UMH: 'static + Deref + Send + Sync,
PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
Sleeper: Fn(Duration) -> SleepFuture
>(
persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
- gossip_sync: GossipSync<PGS, RGS, G, CA, L>, peer_manager: PM, logger: L, scorer: Option<S>,
+ gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
sleeper: Sleeper,
-) -> Result<(), io::Error>
+) -> Result<(), lightning::io::Error>
where
- CA::Target: 'static + chain::Access,
+ UL::Target: 'static + UtxoLookup,
CF::Target: 'static + chain::Filter,
CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: 'static + BroadcasterInterface,
/// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
pub fn start<
'a,
- CA: 'static + Deref + Send + Sync,
+ UL: 'static + Deref + Send + Sync,
CF: 'static + Deref + Send + Sync,
CW: 'static + Deref + Send + Sync,
T: 'static + Deref + Send + Sync,
PS: 'static + Deref + Send,
M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
- PGS: 'static + Deref<Target = P2PGossipSync<G, CA, L>> + Send + Sync,
+ PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
UMH: 'static + Deref + Send + Sync,
PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
SC: for <'b> WriteableScore<'b>,
>(
persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
- gossip_sync: GossipSync<PGS, RGS, G, CA, L>, peer_manager: PM, logger: L, scorer: Option<S>,
+ gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
) -> Self
where
- CA::Target: 'static + chain::Access,
+ UL::Target: 'static + UtxoLookup,
CF::Target: 'static + chain::Filter,
CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: 'static + BroadcasterInterface,
use lightning::routing::router::{DefaultRouter, RouteHop};
use lightning::routing::scoring::{ChannelUsage, Score};
use lightning::util::config::UserConfig;
- use lightning::util::events::{Event, MessageSendEventsProvider, MessageSendEvent};
+ use lightning::util::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
use lightning::util::ser::Writeable;
use lightning::util::test_utils;
use lightning::util::persist::KVStorePersister;
impl Persister {
fn new(data_dir: String) -> Self {
- let filesystem_persister = FilesystemPersister::new(data_dir.clone());
+ let filesystem_persister = FilesystemPersister::new(data_dir);
Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
}
}
fn expect(&mut self, expectation: TestResult) {
- self.event_expectations.get_or_insert_with(|| VecDeque::new()).push_back(expectation);
+ self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
}
}
let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
let network = Network::Testnet;
let genesis_block = genesis_block(network);
- let network_graph = Arc::new(NetworkGraph::new(genesis_block.header.block_hash(), logger.clone()));
+ let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
let scorer = Arc::new(Mutex::new(TestScorer::new()));
let seed = [i as u8; 32];
let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone()));
let now = Duration::from_secs(genesis_block.header.time as u64);
let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
- let best_block = BestBlock::from_genesis(network);
+ let best_block = BestBlock::from_network(network);
let params = ChainParameters { network, best_block };
let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params));
let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
- let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone()));
+ let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()), onion_message_handler: IgnoringMessageHandler{}};
let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), IgnoringMessageHandler{}, keys_manager.clone()));
let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
for i in 0..num_nodes {
for j in (i+1)..num_nodes {
- nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }).unwrap();
- nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }, false).unwrap();
}
}
// Set up a background event handler for SpendableOutputs events.
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
let event_handler = move |event: Event| match event {
- Event::SpendableOutputs { .. } => sender.send(event.clone()).unwrap(),
+ Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
Event::ChannelReady { .. } => {},
Event::ChannelClosed { .. } => {},
_ => panic!("Unexpected event: {:?}", event),
let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion".to_string());
let data_dir = nodes[0].persister.get_data_dir();
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
- let persister = Arc::new(Persister::new(data_dir.clone()).with_graph_persistence_notifier(sender));
+ let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
let network_graph = nodes[0].network_graph.clone();
let features = ChannelFeatures::empty();
network_graph.add_channel_from_partial_announcement(42, 53, features, nodes[0].node.get_our_node_id(), nodes[1].node.get_our_node_id())
0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
];
- nodes[0].rapid_gossip_sync.update_network_graph(&initialization_input[..]).unwrap();
+ nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
// this should have added two channels
assert_eq!(network_graph.read_only().channels().len(), 3);
- let _ = receiver
+ receiver
.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5))
.expect("Network graph not pruned within deadline");
let nodes = create_nodes(1, "test_payment_path_scoring".to_string());
let data_dir = nodes[0].persister.get_data_dir();
- let persister = Arc::new(Persister::new(data_dir.clone()));
+ let persister = Arc::new(Persister::new(data_dir));
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
let scored_scid = 4242;
payment_id: None,
payment_hash: PaymentHash([42; 32]),
payment_failed_permanently: false,
- network_update: None,
- all_paths_failed: true,
+ failure: PathFailure::OnPath { network_update: None },
path: path.clone(),
short_channel_id: Some(scored_scid),
- retry: None,
});
let event = receiver
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
payment_id: None,
payment_hash: PaymentHash([42; 32]),
payment_failed_permanently: true,
- network_update: None,
- all_paths_failed: true,
+ failure: PathFailure::OnPath { network_update: None },
path: path.clone(),
short_channel_id: None,
- retry: None,
});
let event = receiver
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
nodes[0].node.push_pending_event(Event::ProbeFailed {
payment_id: PaymentId([42; 32]),
payment_hash: PaymentHash([42; 32]),
- path: path.clone(),
+ path,
short_channel_id: Some(scored_scid),
});
let event = receiver
[package]
name = "lightning-block-sync"
-version = "0.0.113"
+version = "0.0.114"
authors = ["Jeffrey Czyz", "Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "http://github.com/lightningdevkit/rust-lightning"
[dependencies]
bitcoin = "0.29.0"
-lightning = { version = "0.0.113", path = "../lightning" }
+lightning = { version = "0.0.114", path = "../lightning" }
futures-util = { version = "0.3" }
tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
serde = { version = "1.0", features = ["derive"], optional = true }
chunked_transfer = { version = "1.4", optional = true }
[dev-dependencies]
-lightning = { version = "0.0.113", path = "../lightning", features = ["_test_utils"] }
-tokio = { version = "~1.14", features = [ "macros", "rt" ] }
+lightning = { version = "0.0.114", path = "../lightning", features = ["_test_utils"] }
+tokio = { version = "1.14", features = [ "macros", "rt" ] }
let chain_listener = &ChainListenerSet(chain_listeners_at_height);
let mut chain_notifier = ChainNotifier { header_cache, chain_listener };
chain_notifier.connect_blocks(common_ancestor, most_connected_blocks, &mut chain_poller)
- .await.or_else(|(e, _)| Err(e))?;
+ .await.map_err(|(e, _)| e)?;
}
Ok(best_header)
}
/// Converts the error into the underlying error.
+ ///
+ /// May contain an [`std::io::Error`] from the [`BlockSource`]. See implementations for further
+ /// details, if any.
pub fn into_inner(self) -> Box<dyn std::error::Error + Send + Sync> {
self.error
}
let height = header.height;
let block_data = chain_poller
.fetch_block(&header).await
- .or_else(|e| Err((e, Some(new_tip))))?;
+ .map_err(|e| (e, Some(new_tip)))?;
debug_assert_eq!(block_data.block_hash, header.block_hash);
match block_data.deref() {
BlockData::FullBlock(block) => {
- self.chain_listener.block_connected(&block, height);
+ self.chain_listener.block_connected(block, height);
},
BlockData::HeaderOnly(header) => {
- self.chain_listener.filtered_block_connected(&header, &[], height);
+ self.chain_listener.filtered_block_connected(header, &[], height);
},
}
fn validate(self, block_hash: BlockHash) -> BlockSourceResult<Self::T> {
let pow_valid_block_hash = self.header
.validate_pow(&self.header.target())
- .or_else(|e| Err(BlockSourceError::persistent(e)))?;
+ .map_err(BlockSourceError::persistent)?;
if pow_valid_block_hash != block_hash {
return Err(BlockSourceError::persistent("invalid block hash"));
let pow_valid_block_hash = header
.validate_pow(&header.target())
- .or_else(|e| Err(BlockSourceError::persistent(e)))?;
+ .map_err(BlockSourceError::persistent)?;
if pow_valid_block_hash != block_hash {
return Err(BlockSourceError::persistent("invalid block hash"));
use std::convert::TryFrom;
use std::convert::TryInto;
+use std::error::Error;
+use std::fmt;
use std::sync::atomic::{AtomicUsize, Ordering};
+/// An error returned by the RPC server.
+#[derive(Debug)]
+pub struct RpcError {
+ /// The error code.
+ pub code: i64,
+ /// The error message.
+ pub message: String,
+}
+
+impl fmt::Display for RpcError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "RPC error {}: {}", self.code, self.message)
+ }
+}
+
+impl Error for RpcError {}
+
/// A simple RPC client for calling methods using HTTP `POST`.
+///
+/// Implements [`BlockSource`] and may return an `Err` containing [`RpcError`]. See
+/// [`RpcClient::call_method`] for details.
pub struct RpcClient {
basic_auth: String,
endpoint: HttpEndpoint,
}
/// Calls a method with the response encoded in JSON format and interpreted as type `T`.
+ ///
+ /// When an `Err` is returned, [`std::io::Error::into_inner`] may contain an [`RpcError`] if
+ /// [`std::io::Error::kind`] is [`std::io::ErrorKind::Other`].
pub async fn call_method<T>(&self, method: &str, params: &[serde_json::Value]) -> std::io::Result<T>
where JsonResponse: TryFrom<Vec<u8>, Error = std::io::Error> + TryInto<T, Error = std::io::Error> {
let host = format!("{}:{}", self.endpoint.host(), self.endpoint.port());
let error = &response["error"];
if !error.is_null() {
// TODO: Examine error code for a more precise std::io::ErrorKind.
- let message = error["message"].as_str().unwrap_or("unknown error");
- return Err(std::io::Error::new(std::io::ErrorKind::Other, message));
+ let rpc_error = RpcError {
+ code: error["code"].as_i64().unwrap_or(-1),
+ message: error["message"].as_str().unwrap_or("unknown error").to_string()
+ };
+ return Err(std::io::Error::new(std::io::ErrorKind::Other, rpc_error));
}
let result = &mut response["result"];
match client.call_method::<u64>("getblock", &[invalid_block_hash]).await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::Other);
- assert_eq!(e.get_ref().unwrap().to_string(), "invalid parameter");
+ let rpc_error: Box<RpcError> = e.into_inner().unwrap().downcast().unwrap();
+ assert_eq!(rpc_error.code, -8);
+ assert_eq!(rpc_error.message, "invalid parameter");
},
Ok(_) => panic!("Expected error"),
}
BlockHeaderData {
chainwork: self.blocks[0].header.work() + Uint256::from_u64(height as u64).unwrap(),
height: height as u32,
- header: self.blocks[height].header.clone(),
+ header: self.blocks[height].header,
}
}
}
if self.filtered_blocks {
- return Ok(BlockData::HeaderOnly(block.header.clone()));
+ return Ok(BlockData::HeaderOnly(block.header));
} else {
return Ok(BlockData::FullBlock(block.clone()));
}
--- /dev/null
+[package]
+name = "lightning-custom-message"
+version = "0.0.114"
+authors = ["Jeffrey Czyz"]
+license = "MIT OR Apache-2.0"
+repository = "http://github.com/lightningdevkit/rust-lightning"
+description = """
+Utilities for supporting custom peer-to-peer messages in LDK.
+"""
+edition = "2021"
+
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = ["--cfg", "docsrs"]
+
+[dependencies]
+bitcoin = "0.29.0"
+lightning = { version = "0.0.114", path = "../lightning" }
--- /dev/null
+//! Utilities for supporting custom peer-to-peer messages in LDK.
+//!
+//! [BOLT 1] specifies a custom message type range for use with experimental or application-specific
+//! messages. While a [`CustomMessageHandler`] can be defined to support more than one message type,
+//! defining such a handler requires a significant amount of boilerplate and can be error prone.
+//!
+//! This crate provides the [`composite_custom_message_handler`] macro for easily composing
+//! pre-defined custom message handlers into one handler. The resulting handler can be further
+//! composed with other custom message handlers using the same macro.
+//!
+//! The following example demonstrates defining a `FooBarHandler` to compose separate handlers for
+//! `Foo` and `Bar` messages, and further composing it with a handler for `Baz` messages.
+//!
+//!```
+//! # extern crate bitcoin;
+//! extern crate lightning;
+//! #[macro_use]
+//! extern crate lightning_custom_message;
+//!
+//! # use bitcoin::secp256k1::PublicKey;
+//! # use lightning::io;
+//! # use lightning::ln::msgs::{DecodeError, LightningError};
+//! use lightning::ln::peer_handler::CustomMessageHandler;
+//! use lightning::ln::wire::{CustomMessageReader, self};
+//! use lightning::util::ser::Writeable;
+//! # use lightning::util::ser::Writer;
+//!
+//! // Assume that `FooHandler` and `BarHandler` are defined in one crate and `BazHandler` is
+//! // defined in another crate, handling messages `Foo`, `Bar`, and `Baz`, respectively.
+//!
+//! #[derive(Debug)]
+//! pub struct Foo;
+//!
+//! macro_rules! foo_type_id {
+//! () => { 32768 }
+//! }
+//!
+//! impl wire::Type for Foo {
+//! fn type_id(&self) -> u16 { foo_type_id!() }
+//! }
+//! impl Writeable for Foo {
+//! // ...
+//! # fn write<W: Writer>(&self, _: &mut W) -> Result<(), io::Error> {
+//! # unimplemented!()
+//! # }
+//! }
+//!
+//! pub struct FooHandler;
+//!
+//! impl CustomMessageReader for FooHandler {
+//! // ...
+//! # type CustomMessage = Foo;
+//! # fn read<R: io::Read>(
+//! # &self, _message_type: u16, _buffer: &mut R
+//! # ) -> Result<Option<Self::CustomMessage>, DecodeError> {
+//! # unimplemented!()
+//! # }
+//! }
+//! impl CustomMessageHandler for FooHandler {
+//! // ...
+//! # fn handle_custom_message(
+//! # &self, _msg: Self::CustomMessage, _sender_node_id: &PublicKey
+//! # ) -> Result<(), LightningError> {
+//! # unimplemented!()
+//! # }
+//! # fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> {
+//! # unimplemented!()
+//! # }
+//! }
+//!
+//! #[derive(Debug)]
+//! pub struct Bar;
+//!
+//! macro_rules! bar_type_id {
+//! () => { 32769 }
+//! }
+//!
+//! impl wire::Type for Bar {
+//! fn type_id(&self) -> u16 { bar_type_id!() }
+//! }
+//! impl Writeable for Bar {
+//! // ...
+//! # fn write<W: Writer>(&self, _: &mut W) -> Result<(), io::Error> {
+//! # unimplemented!()
+//! # }
+//! }
+//!
+//! pub struct BarHandler;
+//!
+//! impl CustomMessageReader for BarHandler {
+//! // ...
+//! # type CustomMessage = Bar;
+//! # fn read<R: io::Read>(
+//! # &self, _message_type: u16, _buffer: &mut R
+//! # ) -> Result<Option<Self::CustomMessage>, DecodeError> {
+//! # unimplemented!()
+//! # }
+//! }
+//! impl CustomMessageHandler for BarHandler {
+//! // ...
+//! # fn handle_custom_message(
+//! # &self, _msg: Self::CustomMessage, _sender_node_id: &PublicKey
+//! # ) -> Result<(), LightningError> {
+//! # unimplemented!()
+//! # }
+//! # fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> {
+//! # unimplemented!()
+//! # }
+//! }
+//!
+//! #[derive(Debug)]
+//! pub struct Baz;
+//!
+//! macro_rules! baz_type_id {
+//! () => { 32770 }
+//! }
+//!
+//! impl wire::Type for Baz {
+//! fn type_id(&self) -> u16 { baz_type_id!() }
+//! }
+//! impl Writeable for Baz {
+//! // ...
+//! # fn write<W: Writer>(&self, _: &mut W) -> Result<(), io::Error> {
+//! # unimplemented!()
+//! # }
+//! }
+//!
+//! pub struct BazHandler;
+//!
+//! impl CustomMessageReader for BazHandler {
+//! // ...
+//! # type CustomMessage = Baz;
+//! # fn read<R: io::Read>(
+//! # &self, _message_type: u16, _buffer: &mut R
+//! # ) -> Result<Option<Self::CustomMessage>, DecodeError> {
+//! # unimplemented!()
+//! # }
+//! }
+//! impl CustomMessageHandler for BazHandler {
+//! // ...
+//! # fn handle_custom_message(
+//! # &self, _msg: Self::CustomMessage, _sender_node_id: &PublicKey
+//! # ) -> Result<(), LightningError> {
+//! # unimplemented!()
+//! # }
+//! # fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> {
+//! # unimplemented!()
+//! # }
+//! }
+//!
+//! # fn main() {
+//! // The first crate may define a handler composing `FooHandler` and `BarHandler` and export the
+//! // corresponding message type ids as a macro to use in further composition.
+//!
+//! composite_custom_message_handler!(
+//! pub struct FooBarHandler {
+//! foo: FooHandler,
+//! bar: BarHandler,
+//! }
+//!
+//! pub enum FooBarMessage {
+//! Foo(foo_type_id!()),
+//! Bar(bar_type_id!()),
+//! }
+//! );
+//!
+//! #[macro_export]
+//! macro_rules! foo_bar_type_ids {
+//! () => { foo_type_id!() | bar_type_id!() }
+//! }
+//!
+//! // Another crate can then define a handler further composing `FooBarHandler` with `BazHandler`
+//! // and similarly export the composition of message type ids as a macro.
+//!
+//! composite_custom_message_handler!(
+//! pub struct FooBarBazHandler {
+//! foo_bar: FooBarHandler,
+//! baz: BazHandler,
+//! }
+//!
+//! pub enum FooBarBazMessage {
+//! FooBar(foo_bar_type_ids!()),
+//! Baz(baz_type_id!()),
+//! }
+//! );
+//!
+//! #[macro_export]
+//! macro_rules! foo_bar_baz_type_ids {
+//! () => { foo_bar_type_ids!() | baz_type_id!() }
+//! }
+//! # }
+//!```
+//!
+//! [BOLT 1]: https://github.com/lightning/bolts/blob/master/01-messaging.md
+//! [`CustomMessageHandler`]: crate::lightning::ln::peer_handler::CustomMessageHandler
+
+#![doc(test(no_crate_inject, attr(deny(warnings))))]
+
+pub extern crate bitcoin;
+pub extern crate lightning;
+
+/// Defines a composite type implementing [`CustomMessageHandler`] (and therefore also implementing
+/// [`CustomMessageReader`]), along with a corresponding enumerated custom message [`Type`], from
+/// one or more previously defined custom message handlers.
+///
+/// Useful for parameterizing [`PeerManager`] with custom message handling for one or more sets of
+/// custom messages. Message type ids may be given as a valid `match` pattern, including ranges,
+/// though using OR-ed literal patterns is preferred in order to catch unreachable code for
+/// conflicting handlers.
+///
+/// See [crate documentation] for example usage.
+///
+/// [`CustomMessageHandler`]: crate::lightning::ln::peer_handler::CustomMessageHandler
+/// [`CustomMessageReader`]: crate::lightning::ln::wire::CustomMessageReader
+/// [`Type`]: crate::lightning::ln::wire::Type
+/// [`PeerManager`]: crate::lightning::ln::peer_handler::PeerManager
+/// [crate documentation]: self
+#[macro_export]
+macro_rules! composite_custom_message_handler {
+ (
+ $handler_visibility:vis struct $handler:ident {
+ $($field_visibility:vis $field:ident: $type:ty),* $(,)*
+ }
+
+ $message_visibility:vis enum $message:ident {
+ $($variant:ident($pattern:pat)),* $(,)*
+ }
+ ) => {
+ #[allow(missing_docs)]
+ $handler_visibility struct $handler {
+ $(
+ $field_visibility $field: $type,
+ )*
+ }
+
+ #[allow(missing_docs)]
+ #[derive(Debug)]
+ $message_visibility enum $message {
+ $(
+ $variant(<$type as $crate::lightning::ln::wire::CustomMessageReader>::CustomMessage),
+ )*
+ }
+
+ impl $crate::lightning::ln::peer_handler::CustomMessageHandler for $handler {
+ fn handle_custom_message(
+ &self, msg: Self::CustomMessage, sender_node_id: &$crate::bitcoin::secp256k1::PublicKey
+ ) -> Result<(), $crate::lightning::ln::msgs::LightningError> {
+ match msg {
+ $(
+ $message::$variant(message) => {
+ $crate::lightning::ln::peer_handler::CustomMessageHandler::handle_custom_message(
+ &self.$field, message, sender_node_id
+ )
+ },
+ )*
+ }
+ }
+
+ fn get_and_clear_pending_msg(&self) -> Vec<($crate::bitcoin::secp256k1::PublicKey, Self::CustomMessage)> {
+ vec![].into_iter()
+ $(
+ .chain(
+ self.$field
+ .get_and_clear_pending_msg()
+ .into_iter()
+ .map(|(pubkey, message)| (pubkey, $message::$variant(message)))
+ )
+ )*
+ .collect()
+ }
+ }
+
+ impl $crate::lightning::ln::wire::CustomMessageReader for $handler {
+ type CustomMessage = $message;
+ fn read<R: $crate::lightning::io::Read>(
+ &self, message_type: u16, buffer: &mut R
+ ) -> Result<Option<Self::CustomMessage>, $crate::lightning::ln::msgs::DecodeError> {
+ match message_type {
+ $(
+ $pattern => match <$type>::read(&self.$field, message_type, buffer)? {
+ None => unreachable!(),
+ Some(message) => Ok(Some($message::$variant(message))),
+ },
+ )*
+ _ => Ok(None),
+ }
+ }
+ }
+
+ impl $crate::lightning::ln::wire::Type for $message {
+ fn type_id(&self) -> u16 {
+ match self {
+ $(
+ Self::$variant(message) => message.type_id(),
+ )*
+ }
+ }
+ }
+
+ impl $crate::lightning::util::ser::Writeable for $message {
+ fn write<W: $crate::lightning::util::ser::Writer>(&self, writer: &mut W) -> Result<(), $crate::lightning::io::Error> {
+ match self {
+ $(
+ Self::$variant(message) => message.write(writer),
+ )*
+ }
+ }
+ }
+ }
+}
[package]
name = "lightning-invoice"
description = "Data structures to parse and serialize BOLT11 lightning invoices"
-version = "0.21.0"
+version = "0.22.0"
authors = ["Sebastian Geisler <sgeisler@wh2.tu-dresden.de>"]
documentation = "https://docs.rs/lightning-invoice/"
license = "MIT OR Apache-2.0"
[dependencies]
bech32 = { version = "0.9.0", default-features = false }
-lightning = { version = "0.0.113", path = "../lightning", default-features = false }
+lightning = { version = "0.0.114", path = "../lightning", default-features = false }
secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"] }
num-traits = { version = "0.2.8", default-features = false }
bitcoin_hashes = { version = "0.11", default-features = false }
serde = { version = "1.0.118", optional = true }
[dev-dependencies]
-lightning = { version = "0.0.113", path = "../lightning", default-features = false, features = ["_test_utils"] }
+lightning = { version = "0.0.114", path = "../lightning", default-features = false, features = ["_test_utils"] }
hex = "0.4"
serde_json = { version = "1"}
use core::str;
use core::str::FromStr;
-use bech32;
use bech32::{u5, FromBase32};
use bitcoin_hashes::Hash;
use num_traits::{CheckedAdd, CheckedMul};
-use secp256k1;
use secp256k1::ecdsa::{RecoveryId, RecoverableSignature};
use secp256k1::PublicKey;
};
Ok(RawHrp {
- currency: currency,
+ currency,
raw_amount: amount,
- si_prefix: si_prefix,
+ si_prefix,
})
}
}
let tagged = parse_tagged_parts(&data[7..])?;
Ok(RawDataPart {
- timestamp: timestamp,
+ timestamp,
tagged_fields: tagged,
})
}
fn from_base32(field_data: &[u5]) -> Result<ExpiryTime, ParseError> {
match parse_int_be::<u64, u5>(field_data, 32)
- .map(|t| ExpiryTime::from_seconds(t))
+ .map(ExpiryTime::from_seconds)
{
Some(t) => Ok(t),
None => Err(ParseError::IntegerOverflowError),
type Err = ParseError;
fn from_base32(field_data: &[u5]) -> Result<Fallback, ParseError> {
- if field_data.len() < 1 {
+ if field_data.is_empty() {
return Err(ParseError::UnexpectedEndOfTaggedFields);
}
}
Ok(Fallback::SegWitProgram {
- version: version,
+ version,
program: bytes
})
},
}).collect::<Vec<_>>();
let data = RawDataPart {
- timestamp: timestamp,
- tagged_fields: tagged_fields,
+ timestamp,
+ tagged_fields,
};
Ok(RawInvoice {
- hrp: hrp,
- data: data,
+ hrp,
+ data,
})
}
}
recovered_pub_key = Some(recovered);
}
- let pub_key = included_pub_key.or_else(|| recovered_pub_key.as_ref())
+ let pub_key = included_pub_key.or(recovered_pub_key.as_ref())
.expect("One is always present");
let hash = Message::from_slice(&self.hash[..])
}
#[cfg(feature = "std")]
-impl Into<SystemTime> for PositiveTimestamp {
- fn into(self) -> SystemTime {
- SystemTime::UNIX_EPOCH + self.0
+impl From<PositiveTimestamp> for SystemTime {
+ fn from(val: PositiveTimestamp) -> Self {
+ SystemTime::UNIX_EPOCH + val.0
}
}
/// ```
pub fn from_signed(signed_invoice: SignedRawInvoice) -> Result<Self, SemanticError> {
let invoice = Invoice {
- signed_invoice: signed_invoice,
+ signed_invoice,
};
invoice.check_field_counts()?;
invoice.check_feature_bits()?;
///
/// (C-not exported) because we don't yet export InvoiceDescription
pub fn description(&self) -> InvoiceDescription {
- if let Some(ref direct) = self.signed_invoice.description() {
+ if let Some(direct) = self.signed_invoice.description() {
return InvoiceDescription::Direct(direct);
- } else if let Some(ref hash) = self.signed_invoice.description_hash() {
+ } else if let Some(hash) = self.signed_invoice.description_hash() {
return InvoiceDescription::Hash(hash);
}
unreachable!("ensured by constructor");
}
}
-impl Into<String> for Description {
- fn into(self) -> String {
- self.into_inner()
+impl From<Description> for String {
+ fn from(val: Description) -> Self {
+ val.into_inner()
}
}
}
}
-impl Into<RouteHint> for PrivateRoute {
- fn into(self) -> RouteHint {
- self.into_inner()
+impl From<PrivateRoute> for RouteHint {
+ fn from(val: PrivateRoute) -> Self {
+ val.into_inner()
}
}
// Multiple payment secrets
let invoice = {
- let mut invoice = invoice_template.clone();
+ let mut invoice = invoice_template;
invoice.data.tagged_fields.push(PaymentSecret(payment_secret).into());
invoice.data.tagged_fields.push(PaymentSecret(payment_secret).into());
invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_ecdsa_recoverable(hash, &private_key)))
assert_eq!(invoice.hrp.raw_amount, Some(15));
- let invoice = builder.clone()
+ let invoice = builder
.amount_milli_satoshis(150)
.build_raw()
.unwrap();
.build_raw();
assert_eq!(long_route_res, Err(CreationError::RouteTooLong));
- let sign_error_res = builder.clone()
+ let sign_error_res = builder
.description("Test".into())
.payment_secret(PaymentSecret([0; 32]))
.try_build_signed(|_| {
let route_1 = RouteHint(vec![
RouteHintHop {
- src_node_id: public_key.clone(),
+ src_node_id: public_key,
short_channel_id: de::parse_int_be(&[123; 8], 256).expect("short chan ID slice too big?"),
fees: RoutingFees {
base_msat: 2,
htlc_maximum_msat: None,
},
RouteHintHop {
- src_node_id: public_key.clone(),
+ src_node_id: public_key,
short_channel_id: de::parse_int_be(&[42; 8], 256).expect("short chan ID slice too big?"),
fees: RoutingFees {
base_msat: 3,
let route_2 = RouteHint(vec![
RouteHintHop {
- src_node_id: public_key.clone(),
+ src_node_id: public_key,
short_channel_id: 0,
fees: RoutingFees {
base_msat: 4,
htlc_maximum_msat: None,
},
RouteHintHop {
- src_node_id: public_key.clone(),
+ src_node_id: public_key,
short_channel_id: de::parse_int_be(&[1; 8], 256).expect("short chan ID slice too big?"),
fees: RoutingFees {
base_msat: 5,
let builder = InvoiceBuilder::new(Currency::BitcoinTestnet)
.amount_milli_satoshis(123)
.duration_since_epoch(Duration::from_secs(1234567))
- .payee_pub_key(public_key.clone())
+ .payee_pub_key(public_key)
.expiry_time(Duration::from_secs(54321))
.min_final_cltv_expiry_delta(144)
.fallback(Fallback::PubKeyHash([0;20]))
use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use lightning::chain::keysinterface::{NodeSigner, SignerProvider, EntropySource};
use lightning::ln::{PaymentHash, PaymentSecret};
-use lightning::ln::channelmanager::{ChannelManager, PaymentId, PaymentSendFailure, Retry};
+use lightning::ln::channelmanager::{ChannelManager, PaymentId, Retry, RetryableSendFailure};
use lightning::routing::router::{PaymentParameters, RouteParameters, Router};
use lightning::util::logger::Logger;
invoice: &Invoice, amount_msats: u64, payment_id: PaymentId, retry_strategy: Retry,
payer: P
) -> Result<(), PaymentError> where P::Target: Payer {
- let payment_hash = PaymentHash(invoice.payment_hash().clone().into_inner());
- let payment_secret = Some(invoice.payment_secret().clone());
+ let payment_hash = PaymentHash((*invoice.payment_hash()).into_inner());
+ let payment_secret = Some(*invoice.payment_secret());
let mut payment_params = PaymentParameters::from_node_id(invoice.recover_payee_pub_key(),
invoice.min_final_cltv_expiry_delta() as u32)
- .with_expiry_time(expiry_time_from_unix_epoch(&invoice).as_secs())
+ .with_expiry_time(expiry_time_from_unix_epoch(invoice).as_secs())
.with_route_hints(invoice.route_hints());
if let Some(features) = invoice.features() {
payment_params = payment_params.with_features(features.clone());
let route_params = RouteParameters {
payment_params,
final_value_msat: amount_msats,
- final_cltv_expiry_delta: invoice.min_final_cltv_expiry_delta() as u32,
};
payer.send_payment(payment_hash, &payment_secret, payment_id, route_params, retry_strategy)
/// An error resulting from the provided [`Invoice`] or payment hash.
Invoice(&'static str),
/// An error occurring when sending a payment.
- Sending(PaymentSendFailure),
+ Sending(RetryableSendFailure),
}
/// A trait defining behavior of an [`Invoice`] payer.
payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry
) -> Result<(), PaymentError> {
self.send_payment_with_retry(payment_hash, payment_secret, payment_id, route_params, retry_strategy)
- .map_err(|e| PaymentError::Sending(e))
+ .map_err(PaymentError::Sending)
}
}
let invoice = invoice(payment_preimage);
let amt_msat = 10_000;
- match pay_zero_value_invoice(&invoice, amt_msat, Retry::Attempts(0), &nodes[0].node) {
+ match pay_zero_value_invoice(&invoice, amt_msat, Retry::Attempts(0), nodes[0].node) {
Err(PaymentError::Invoice("amount unexpected")) => {},
_ => panic!()
}
self.writer.write_u5(
u5::try_from_u8((self.buffer & 0b11111000) >> 3 ).expect("<32")
)?;
- self.buffer = self.buffer << 5;
+ self.buffer <<= 5;
self.buffer_bits -= 5;
}
self.writer.write_u5(u5::try_from_u8(from_buffer | from_byte).expect("<32"))?;
self.buffer = byte << (5 - self.buffer_bits);
- self.buffer_bits = 3 + self.buffer_bits;
+ self.buffer_bits += 3;
Ok(())
}
self.writer.write_u5(
u5::try_from_u8((self.buffer & 0b11111000) >> 3).expect("<32")
)?;
- self.buffer = self.buffer << 5;
+ self.buffer <<= 5;
self.buffer_bits -= 5;
}
L::Target: Logger,
{
- if phantom_route_hints.len() == 0 {
+ if phantom_route_hints.is_empty() {
return Err(SignOrCreationError::CreationError(
CreationError::MissingRouteHints,
));
}
}
- if channel.is_usable {
- if !online_channel_exists {
- log_trace!(logger, "Channel with connected peer exists for invoice route hints");
- online_channel_exists = true;
- }
+ if channel.is_usable && !online_channel_exists {
+ log_trace!(logger, "Channel with connected peer exists for invoice route hints");
+ online_channel_exists = true;
}
match filtered_channels.entry(channel.counterparty.node_id) {
create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
let non_default_invoice_expiry_secs = 4200;
let invoice = create_invoice_from_channelmanager_and_duration_since_epoch(
- &nodes[1].node, nodes[1].keys_manager, nodes[1].logger, Currency::BitcoinTestnet,
+ nodes[1].node, nodes[1].keys_manager, nodes[1].logger, Currency::BitcoinTestnet,
Some(10_000), "test".to_string(), Duration::from_secs(1234567),
non_default_invoice_expiry_secs, None).unwrap();
assert_eq!(invoice.amount_pico_btc(), Some(100_000));
let route_params = RouteParameters {
payment_params,
final_value_msat: invoice.amount_milli_satoshis().unwrap(),
- final_cltv_expiry_delta: invoice.min_final_cltv_expiry_delta() as u32,
};
let first_hops = nodes[0].node.list_usable_channels();
let network_graph = &node_cfgs[0].network_graph;
let logger = test_utils::TestLogger::new();
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
- &nodes[0].node.get_our_node_id(), &route_params, &network_graph,
+ &nodes[0].node.get_our_node_id(), &route_params, network_graph,
Some(&first_hops.iter().collect::<Vec<_>>()), &logger, &scorer, &random_seed_bytes
).unwrap();
let payment_event = {
let mut payment_hash = PaymentHash([0; 32]);
payment_hash.0.copy_from_slice(&invoice.payment_hash().as_ref()[0..32]);
- nodes[0].node.send_payment(&route, payment_hash, &Some(invoice.payment_secret().clone()), PaymentId(payment_hash.0)).unwrap();
+ nodes[0].node.send_payment(&route, payment_hash, &Some(*invoice.payment_secret()), PaymentId(payment_hash.0)).unwrap();
let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
assert_eq!(added_monitors.len(), 1);
added_monitors.clear();
let custom_min_final_cltv_expiry_delta = Some(50);
let invoice = crate::utils::create_invoice_from_channelmanager_and_duration_since_epoch(
- &nodes[1].node, nodes[1].keys_manager, nodes[1].logger, Currency::BitcoinTestnet,
+ nodes[1].node, nodes[1].keys_manager, nodes[1].logger, Currency::BitcoinTestnet,
Some(10_000), "".into(), Duration::from_secs(1234567), 3600,
if with_custom_delta { custom_min_final_cltv_expiry_delta } else { None },
).unwrap();
let custom_min_final_cltv_expiry_delta = Some(21);
let invoice = crate::utils::create_invoice_from_channelmanager_and_duration_since_epoch(
- &nodes[1].node, nodes[1].keys_manager, nodes[1].logger, Currency::BitcoinTestnet,
+ nodes[1].node, nodes[1].keys_manager, nodes[1].logger, Currency::BitcoinTestnet,
Some(10_000), "".into(), Duration::from_secs(1234567), 3600,
custom_min_final_cltv_expiry_delta,
).unwrap();
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let description_hash = crate::Sha256(Hash::hash("Testing description_hash".as_bytes()));
let invoice = crate::utils::create_invoice_from_channelmanager_with_description_hash_and_duration_since_epoch(
- &nodes[1].node, nodes[1].keys_manager, nodes[1].logger, Currency::BitcoinTestnet,
+ nodes[1].node, nodes[1].keys_manager, nodes[1].logger, Currency::BitcoinTestnet,
Some(10_000), description_hash, Duration::from_secs(1234567), 3600, None,
).unwrap();
assert_eq!(invoice.amount_pico_btc(), Some(100_000));
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let payment_hash = PaymentHash([0; 32]);
let invoice = crate::utils::create_invoice_from_channelmanager_and_duration_since_epoch_with_payment_hash(
- &nodes[1].node, nodes[1].keys_manager, nodes[1].logger, Currency::BitcoinTestnet,
+ nodes[1].node, nodes[1].keys_manager, nodes[1].logger, Currency::BitcoinTestnet,
Some(10_000), "test".to_string(), Duration::from_secs(1234567), 3600,
payment_hash, None,
).unwrap();
// With only one sufficient-value peer connected we should only get its hint
scid_aliases.remove(&chan_b.0.short_channel_id_alias.unwrap());
- nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
match_invoice_routes(Some(1_000_000_000), &nodes[0], scid_aliases.clone());
// If we don't have any sufficient-value peers connected we should get all hints with
// sufficient value, even though there is a connected insufficient-value peer.
scid_aliases.insert(chan_b.0.short_channel_id_alias.unwrap());
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
match_invoice_routes(Some(1_000_000_000), &nodes[0], scid_aliases);
}
mut chan_ids_to_match: HashSet<u64>
) {
let invoice = create_invoice_from_channelmanager_and_duration_since_epoch(
- &invoice_node.node, invoice_node.keys_manager, invoice_node.logger,
+ invoice_node.node, invoice_node.keys_manager, invoice_node.logger,
Currency::BitcoinTestnet, invoice_amt, "test".to_string(), Duration::from_secs(1234567),
3600, None).unwrap();
let hints = invoice.private_routes();
#[cfg(feature = "std")]
fn do_test_multi_node_receive(user_generated_pmt_hash: bool) {
let mut chanmon_cfgs = create_chanmon_cfgs(3);
- let seed_1 = [42 as u8; 32];
- let seed_2 = [43 as u8; 32];
- let cross_node_seed = [44 as u8; 32];
+ let seed_1 = [42u8; 32];
+ let seed_2 = [43u8; 32];
+ let cross_node_seed = [44u8; 32];
chanmon_cfgs[1].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let invoice =
crate::utils::create_phantom_invoice::<&test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestLogger>(
Some(payment_amt), payment_hash, "test".to_string(), non_default_invoice_expiry_secs,
- route_hints, &nodes[1].keys_manager, &nodes[1].keys_manager, &nodes[1].logger,
+ route_hints, nodes[1].keys_manager, nodes[1].keys_manager, nodes[1].logger,
Currency::BitcoinTestnet, None, Duration::from_secs(1234567)
).unwrap();
let (payment_hash, payment_secret) = (PaymentHash(invoice.payment_hash().into_inner()), *invoice.payment_secret());
let params = RouteParameters {
payment_params,
final_value_msat: invoice.amount_milli_satoshis().unwrap(),
- final_cltv_expiry_delta: invoice.min_final_cltv_expiry_delta() as u32,
};
let first_hops = nodes[0].node.list_usable_channels();
let network_graph = &node_cfgs[0].network_graph;
let logger = test_utils::TestLogger::new();
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
- &nodes[0].node.get_our_node_id(), ¶ms, &network_graph,
+ &nodes[0].node.get_our_node_id(), ¶ms, network_graph,
Some(&first_hops.iter().collect::<Vec<_>>()), &logger, &scorer, &random_seed_bytes
).unwrap();
let (payment_event, fwd_idx) = {
let mut payment_hash = PaymentHash([0; 32]);
payment_hash.0.copy_from_slice(&invoice.payment_hash().as_ref()[0..32]);
- nodes[0].node.send_payment(&route, payment_hash, &Some(invoice.payment_secret().clone()), PaymentId(payment_hash.0)).unwrap();
+ nodes[0].node.send_payment(&route, payment_hash, &Some(*invoice.payment_secret()), PaymentId(payment_hash.0)).unwrap();
let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
assert_eq!(added_monitors.len(), 1);
added_monitors.clear();
let payment_preimage_opt = if user_generated_pmt_hash { None } else { Some(payment_preimage) };
expect_payment_claimable!(&nodes[fwd_idx], payment_hash, payment_secret, payment_amt, payment_preimage_opt, route.paths[0].last().unwrap().pubkey);
- do_claim_payment_along_route(&nodes[0], &vec!(&vec!(&nodes[fwd_idx])[..]), false, payment_preimage);
+ do_claim_payment_along_route(&nodes[0], &[&vec!(&nodes[fwd_idx])[..]], false, payment_preimage);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
match events[0] {
#[cfg(feature = "std")]
fn test_multi_node_hints_has_htlc_min_max_values() {
let mut chanmon_cfgs = create_chanmon_cfgs(3);
- let seed_1 = [42 as u8; 32];
- let seed_2 = [43 as u8; 32];
- let cross_node_seed = [44 as u8; 32];
+ let seed_1 = [42u8; 32];
+ let seed_2 = [43u8; 32];
+ let cross_node_seed = [44u8; 32];
chanmon_cfgs[1].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let invoice = crate::utils::create_phantom_invoice::<&test_utils::TestKeysInterface,
&test_utils::TestKeysInterface, &test_utils::TestLogger>(Some(payment_amt), Some(payment_hash),
- "test".to_string(), 3600, route_hints, &nodes[1].keys_manager, &nodes[1].keys_manager,
- &nodes[1].logger, Currency::BitcoinTestnet, None, Duration::from_secs(1234567)).unwrap();
+ "test".to_string(), 3600, route_hints, nodes[1].keys_manager, nodes[1].keys_manager,
+ nodes[1].logger, Currency::BitcoinTestnet, None, Duration::from_secs(1234567)).unwrap();
let chan_0_1 = &nodes[1].node.list_usable_channels()[0];
assert_eq!(invoice.route_hints()[0].0[0].htlc_minimum_msat, chan_0_1.inbound_htlc_minimum_msat);
&test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestLogger,
>(
Some(payment_amt), None, non_default_invoice_expiry_secs, description_hash,
- route_hints, &nodes[1].keys_manager, &nodes[1].keys_manager, &nodes[1].logger,
+ route_hints, nodes[1].keys_manager, nodes[1].keys_manager, nodes[1].logger,
Currency::BitcoinTestnet, None, Duration::from_secs(1234567),
)
.unwrap();
let duration_since_epoch = Duration::from_secs(1234567);
let invoice = crate::utils::create_phantom_invoice::<&test_utils::TestKeysInterface,
&test_utils::TestKeysInterface, &test_utils::TestLogger>(Some(payment_amt), payment_hash,
- "".to_string(), non_default_invoice_expiry_secs, route_hints, &nodes[1].keys_manager, &nodes[1].keys_manager,
- &nodes[1].logger, Currency::BitcoinTestnet, min_final_cltv_expiry_delta, duration_since_epoch).unwrap();
+ "".to_string(), non_default_invoice_expiry_secs, route_hints, nodes[1].keys_manager, nodes[1].keys_manager,
+ nodes[1].logger, Currency::BitcoinTestnet, min_final_cltv_expiry_delta, duration_since_epoch).unwrap();
assert_eq!(invoice.amount_pico_btc(), Some(200_000));
assert_eq!(invoice.min_final_cltv_expiry_delta(), (min_final_cltv_expiry_delta.unwrap() + 3) as u64);
assert_eq!(invoice.expiry_time(), Duration::from_secs(non_default_invoice_expiry_secs.into()));
#[cfg(feature = "std")]
fn test_multi_node_hints_includes_single_channels_to_participating_nodes() {
let mut chanmon_cfgs = create_chanmon_cfgs(3);
- let seed_1 = [42 as u8; 32];
- let seed_2 = [43 as u8; 32];
- let cross_node_seed = [44 as u8; 32];
+ let seed_1 = [42u8; 32];
+ let seed_2 = [43u8; 32];
+ let cross_node_seed = [44u8; 32];
chanmon_cfgs[1].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
#[cfg(feature = "std")]
fn test_multi_node_hints_includes_one_channel_of_each_counterparty_nodes_per_participating_node() {
let mut chanmon_cfgs = create_chanmon_cfgs(4);
- let seed_1 = [42 as u8; 32];
- let seed_2 = [43 as u8; 32];
- let cross_node_seed = [44 as u8; 32];
+ let seed_1 = [42u8; 32];
+ let seed_2 = [43u8; 32];
+ let cross_node_seed = [44u8; 32];
chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
chanmon_cfgs[3].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
#[cfg(feature = "std")]
fn test_multi_node_forwarding_info_not_assigned_channel_excluded_from_hints() {
let mut chanmon_cfgs = create_chanmon_cfgs(4);
- let seed_1 = [42 as u8; 32];
- let seed_2 = [43 as u8; 32];
- let cross_node_seed = [44 as u8; 32];
+ let seed_1 = [42u8; 32];
+ let seed_2 = [43u8; 32];
+ let cross_node_seed = [44u8; 32];
chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
chanmon_cfgs[3].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
#[cfg(feature = "std")]
fn test_multi_node_with_only_public_channels_hints_includes_only_phantom_route() {
let mut chanmon_cfgs = create_chanmon_cfgs(3);
- let seed_1 = [42 as u8; 32];
- let seed_2 = [43 as u8; 32];
- let cross_node_seed = [44 as u8; 32];
+ let seed_1 = [42u8; 32];
+ let seed_2 = [43u8; 32];
+ let cross_node_seed = [44u8; 32];
chanmon_cfgs[1].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
#[cfg(feature = "std")]
fn test_multi_node_with_mixed_public_and_private_channel_hints_includes_only_phantom_route() {
let mut chanmon_cfgs = create_chanmon_cfgs(4);
- let seed_1 = [42 as u8; 32];
- let seed_2 = [43 as u8; 32];
- let cross_node_seed = [44 as u8; 32];
+ let seed_1 = [42u8; 32];
+ let seed_2 = [43u8; 32];
+ let cross_node_seed = [44u8; 32];
chanmon_cfgs[1].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
#[cfg(feature = "std")]
fn test_multi_node_hints_has_only_highest_inbound_capacity_channel() {
let mut chanmon_cfgs = create_chanmon_cfgs(3);
- let seed_1 = [42 as u8; 32];
- let seed_2 = [43 as u8; 32];
- let cross_node_seed = [44 as u8; 32];
+ let seed_1 = [42u8; 32];
+ let seed_2 = [43u8; 32];
+ let cross_node_seed = [44u8; 32];
chanmon_cfgs[1].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
#[cfg(feature = "std")]
fn test_multi_node_channels_inbound_capacity_lower_than_invoice_amt_filtering() {
let mut chanmon_cfgs = create_chanmon_cfgs(4);
- let seed_1 = [42 as u8; 32];
- let seed_2 = [43 as u8; 32];
- let cross_node_seed = [44 as u8; 32];
+ let seed_1 = [42u8; 32];
+ let seed_2 = [43u8; 32];
+ let cross_node_seed = [44u8; 32];
chanmon_cfgs[1].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
let invoice = crate::utils::create_phantom_invoice::<&test_utils::TestKeysInterface,
&test_utils::TestKeysInterface, &test_utils::TestLogger>(invoice_amt, None, "test".to_string(),
- 3600, phantom_route_hints, &invoice_node.keys_manager, &invoice_node.keys_manager,
- &invoice_node.logger, Currency::BitcoinTestnet, None, Duration::from_secs(1234567)).unwrap();
+ 3600, phantom_route_hints, invoice_node.keys_manager, invoice_node.keys_manager,
+ invoice_node.logger, Currency::BitcoinTestnet, None, Duration::from_secs(1234567)).unwrap();
let invoice_hints = invoice.private_routes();
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let result = crate::utils::create_invoice_from_channelmanager_and_duration_since_epoch(
- &nodes[1].node, nodes[1].keys_manager, nodes[1].logger, Currency::BitcoinTestnet,
+ nodes[1].node, nodes[1].keys_manager, nodes[1].logger, Currency::BitcoinTestnet,
Some(10_000), "Some description".into(), Duration::from_secs(1234567), 3600, Some(MIN_FINAL_CLTV_EXPIRY_DELTA - 4),
);
match result {
[package]
name = "lightning-net-tokio"
-version = "0.0.113"
+version = "0.0.114"
authors = ["Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning/"
[dependencies]
bitcoin = "0.29.0"
-lightning = { version = "0.0.113", path = "../lightning" }
+lightning = { version = "0.0.114", path = "../lightning" }
tokio = { version = "1.0", features = [ "io-util", "macros", "rt", "sync", "net", "time" ] }
[dev-dependencies]
-tokio = { version = "~1.14", features = [ "io-util", "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
-lightning = { version = "0.0.113", path = "../lightning", features = ["_test_utils"] }
+tokio = { version = "1.14", features = [ "io-util", "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
+lightning = { version = "0.0.114", path = "../lightning", features = ["_test_utils"] }
//! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
//! type Logger = dyn lightning::util::logger::Logger + Send + Sync;
//! type NodeSigner = dyn lightning::chain::keysinterface::NodeSigner + Send + Sync;
-//! type ChainAccess = dyn lightning::chain::Access + Send + Sync;
+//! type UtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
//! type ChainFilter = dyn lightning::chain::Filter + Send + Sync;
//! type DataPersister = dyn lightning::chain::chainmonitor::Persist<lightning::chain::keysinterface::InMemorySigner> + Send + Sync;
//! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::chain::keysinterface::InMemorySigner, Arc<ChainFilter>, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>, Arc<DataPersister>>;
//! type ChannelManager = Arc<lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor, TxBroadcaster, FeeEstimator, Logger>>;
-//! type PeerManager = Arc<lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChainMonitor, TxBroadcaster, FeeEstimator, ChainAccess, Logger>>;
+//! type PeerManager = Arc<lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChainMonitor, TxBroadcaster, FeeEstimator, UtxoLookup, Logger>>;
//!
//! // Connect to node with pubkey their_node_id at addr:
//! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
let (event_waker, event_receiver) = mpsc::channel(1);
tokio::spawn(Self::poll_event_process(peer_manager.clone(), event_receiver));
- // 8KB is nice and big but also should never cause any issues with stack overflowing.
- let mut buf = [0; 8192];
+ // 4KiB is nice and big without handling too many messages all at once, giving other peers
+ // a chance to do some work.
+ let mut buf = [0; 4096];
let mut our_descriptor = SocketDescriptor::new(us.clone());
// An enum describing why we did/are disconnecting:
tokio::select! {
v = write_avail_receiver.recv() => {
assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc!
- if let Err(_) = peer_manager.write_buffer_space_avail(&mut our_descriptor) {
+ if peer_manager.write_buffer_space_avail(&mut our_descriptor).is_err() {
break Disconnect::CloseConnection;
}
},
#[cfg(test)]
let last_us = Arc::clone(&us);
- let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr) {
+ let handle_opt = if peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr).is_ok() {
Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)))
} else {
// Note that we will skip socket_disconnected here, in accordance with the PeerManager
fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
fn get_next_channel_announcement(&self, _starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { None }
fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option<NodeAnnouncement> { None }
- fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> { Ok(()) }
+ fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) }
fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures { InitFeatures::empty() }
+ fn processing_queue_high(&self) -> bool { false }
}
impl ChannelMessageHandler for MsgHandler {
fn handle_open_channel(&self, _their_node_id: &PublicKey, _msg: &OpenChannel) {}
fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {}
fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {}
fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {}
- fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
+ fn peer_disconnected(&self, their_node_id: &PublicKey) {
if *their_node_id == self.expected_pubkey {
self.disconnected_flag.store(true, Ordering::SeqCst);
self.pubkey_disconnected.clone().try_send(()).unwrap();
}
}
- fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> {
+ fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init, _inbound: bool) -> Result<(), ()> {
if *their_node_id == self.expected_pubkey {
self.pubkey_connected.clone().try_send(()).unwrap();
}
[package]
name = "lightning-persister"
-version = "0.0.113"
+version = "0.0.114"
authors = ["Valentine Wallace", "Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning/"
[dependencies]
bitcoin = "0.29.0"
-lightning = { version = "0.0.113", path = "../lightning" }
+lightning = { version = "0.0.114", path = "../lightning" }
libc = "0.2"
[target.'cfg(windows)'.dependencies]
winapi = { version = "0.3", features = ["winbase"] }
[dev-dependencies]
-lightning = { version = "0.0.113", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.114", path = "../lightning", features = ["_test_utils"] }
let chanmon_cfgs = create_chanmon_cfgs(1);
let mut node_cfgs = create_node_cfgs(1, &chanmon_cfgs);
- let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
+ let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister, node_cfgs[0].keys_manager);
node_cfgs[0].chain_monitor = chain_mon_0;
let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]);
let nodes = create_network(1, &node_cfgs, &node_chanmgrs);
let persister_1 = FilesystemPersister::new("test_filesystem_persister_1".to_string());
let chanmon_cfgs = create_chanmon_cfgs(2);
let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister_0, &node_cfgs[0].keys_manager);
- let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, &persister_1, &node_cfgs[1].keys_manager);
+ let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister_0, node_cfgs[0].keys_manager);
+ let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, &persister_1, node_cfgs[1].keys_manager);
node_cfgs[0].chain_monitor = chain_mon_0;
node_cfgs[1].chain_monitor = chain_mon_1;
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let test_writeable = TestWriteable{};
let filename = "test_readonly_dir_persister_filename".to_string();
let path = "test_readonly_dir_persister_dir";
- fs::create_dir_all(path.to_string()).unwrap();
- let mut perms = fs::metadata(path.to_string()).unwrap().permissions();
+ fs::create_dir_all(path).unwrap();
+ let mut perms = fs::metadata(path).unwrap().permissions();
perms.set_readonly(true);
- fs::set_permissions(path.to_string(), perms).unwrap();
+ fs::set_permissions(path, perms).unwrap();
let mut dest_file = PathBuf::from(path);
dest_file.push(filename);
match write_to_file(dest_file, &test_writeable) {
[package]
name = "lightning-rapid-gossip-sync"
-version = "0.0.113"
+version = "0.0.114"
authors = ["Arik Sosman <git@arik.io>"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning"
_bench_unstable = []
[dependencies]
-lightning = { version = "0.0.113", path = "../lightning", default-features = false }
+lightning = { version = "0.0.114", path = "../lightning", default-features = false }
bitcoin = { version = "0.29.0", default-features = false }
[dev-dependencies]
-lightning = { version = "0.0.113", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.114", path = "../lightning", features = ["_test_utils"] }
//! # }
//! # let logger = FakeLogger {};
//!
-//! let block_hash = genesis_block(Network::Bitcoin).header.block_hash();
-//! let network_graph = NetworkGraph::new(block_hash, &logger);
-//! let rapid_sync = RapidGossipSync::new(&network_graph);
+//! let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
+//! let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
//! let snapshot_contents: &[u8] = &[0; 0];
//! let new_last_sync_timestamp_result = rapid_sync.update_network_graph(snapshot_contents);
//! ```
pub struct RapidGossipSync<NG: Deref<Target=NetworkGraph<L>>, L: Deref>
where L::Target: Logger {
network_graph: NG,
+ logger: L,
is_initial_sync_complete: AtomicBool
}
impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L::Target: Logger {
/// Instantiate a new [`RapidGossipSync`] instance.
- pub fn new(network_graph: NG) -> Self {
+ pub fn new(network_graph: NG, logger: L) -> Self {
Self {
network_graph,
+ logger,
is_initial_sync_complete: AtomicBool::new(false)
}
}
/// Update network graph from binary data.
/// Returns the last sync timestamp to be used the next time rapid sync data is queried.
///
- /// `network_graph`: network graph to be updated
- ///
/// `update_data`: `&[u8]` binary stream that comprises the update data
pub fn update_network_graph(&self, update_data: &[u8]) -> Result<u32, GraphSyncError> {
let mut read_cursor = io::Cursor::new(update_data);
self.update_network_graph_from_byte_stream(&mut read_cursor)
}
+ /// Update network graph from binary data.
+ /// Returns the last sync timestamp to be used the next time rapid sync data is queried.
+ ///
+ /// `update_data`: `&[u8]` binary stream that comprises the update data
+ /// `current_time_unix`: `Option<u64>` optional current timestamp to verify data age
+ pub fn update_network_graph_no_std(&self, update_data: &[u8], current_time_unix: Option<u64>) -> Result<u32, GraphSyncError> {
+ let mut read_cursor = io::Cursor::new(update_data);
+ self.update_network_graph_from_byte_stream_no_std(&mut read_cursor, current_time_unix)
+ }
+
/// Gets a reference to the underlying [`NetworkGraph`] which was provided in
/// [`RapidGossipSync::new`].
///
mod tests {
use std::fs;
- use bitcoin::blockdata::constants::genesis_block;
use bitcoin::Network;
use lightning::ln::msgs::DecodeError;
fs::create_dir_all(graph_sync_test_directory).unwrap();
let graph_sync_test_file = test.get_test_file_path();
- fs::write(&graph_sync_test_file, valid_response).unwrap();
+ fs::write(graph_sync_test_file, valid_response).unwrap();
test
}
+
fn get_test_directory(&self) -> String {
- let graph_sync_test_directory = self.directory.clone() + "/graph-sync-tests";
- graph_sync_test_directory
+ self.directory.clone() + "/graph-sync-tests"
}
+
fn get_test_file_path(&self) -> String {
- let graph_sync_test_directory = self.get_test_directory();
- let graph_sync_test_file = graph_sync_test_directory.to_owned() + "/test_data.lngossip";
- graph_sync_test_file
+ self.get_test_directory() + "/test_data.lngossip"
}
}
let sync_test = FileSyncTest::new(tmp_directory, &valid_response);
let graph_sync_test_file = sync_test.get_test_file_path();
- let block_hash = genesis_block(Network::Bitcoin).block_hash();
let logger = TestLogger::new();
- let network_graph = NetworkGraph::new(block_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
assert_eq!(network_graph.read_only().channels().len(), 0);
- let rapid_sync = RapidGossipSync::new(&network_graph);
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let sync_result = rapid_sync.sync_network_graph_with_file_path(&graph_sync_test_file);
if sync_result.is_err() {
#[test]
fn measure_native_read_from_file() {
- let block_hash = genesis_block(Network::Bitcoin).block_hash();
let logger = TestLogger::new();
- let network_graph = NetworkGraph::new(block_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
assert_eq!(network_graph.read_only().channels().len(), 0);
- let rapid_sync = RapidGossipSync::new(&network_graph);
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let start = std::time::Instant::now();
let sync_result = rapid_sync
.sync_network_graph_with_file_path("./res/full_graph.lngossip");
pub mod bench {
use test::Bencher;
- use bitcoin::blockdata::constants::genesis_block;
use bitcoin::Network;
use lightning::ln::msgs::DecodeError;
#[bench]
fn bench_reading_full_graph_from_file(b: &mut Bencher) {
- let block_hash = genesis_block(Network::Bitcoin).block_hash();
let logger = TestLogger::new();
b.iter(|| {
- let network_graph = NetworkGraph::new(block_hash, &logger);
- let rapid_sync = RapidGossipSync::new(&network_graph);
+ let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let sync_result = rapid_sync.sync_network_graph_with_file_path("./res/full_graph.lngossip");
if let Err(crate::error::GraphSyncError::DecodeError(DecodeError::Io(io_error))) = &sync_result {
let error_string = format!("Input file lightning-rapid-gossip-sync/res/full_graph.lngossip is missing! Download it from https://bitcoin.ninja/ldk-compressed_graph-bc08df7542-2022-05-05.bin\n\n{:?}", io_error);
};
use lightning::routing::gossip::NetworkGraph;
use lightning::util::logger::Logger;
+use lightning::{log_warn, log_trace, log_given_level};
use lightning::util::ser::{BigSize, Readable};
use lightning::io;
use crate::error::GraphSyncError;
use crate::RapidGossipSync;
+#[cfg(all(feature = "std", not(test)))]
+use std::time::{SystemTime, UNIX_EPOCH};
+
#[cfg(not(feature = "std"))]
use alloc::{vec::Vec, borrow::ToOwned};
/// avoid malicious updates being able to trigger excessive memory allocation.
const MAX_INITIAL_NODE_ID_VECTOR_CAPACITY: u32 = 50_000;
+/// We disallow gossip data that's more than two weeks old, per BOLT 7's
+/// suggestion.
+const STALE_RGS_UPDATE_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 14;
+
impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L::Target: Logger {
pub(crate) fn update_network_graph_from_byte_stream<R: io::Read>(
+ &self,
+ read_cursor: &mut R,
+ ) -> Result<u32, GraphSyncError> {
+ #[allow(unused_mut, unused_assignments)]
+ let mut current_time_unix = None;
+ #[cfg(all(feature = "std", not(test)))]
+ {
+ // Note that many tests rely on being able to set arbitrarily old timestamps, thus we
+ // disable this check during tests!
+ current_time_unix = Some(SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs());
+ }
+ self.update_network_graph_from_byte_stream_no_std(read_cursor, current_time_unix)
+ }
+
+ pub(crate) fn update_network_graph_from_byte_stream_no_std<R: io::Read>(
&self,
mut read_cursor: &mut R,
+ current_time_unix: Option<u64>
) -> Result<u32, GraphSyncError> {
let mut prefix = [0u8; 4];
read_cursor.read_exact(&mut prefix)?;
let chain_hash: BlockHash = Readable::read(read_cursor)?;
let latest_seen_timestamp: u32 = Readable::read(read_cursor)?;
+
+ if let Some(time) = current_time_unix {
+ if (latest_seen_timestamp as u64) < time.saturating_sub(STALE_RGS_UPDATE_AGE_LIMIT_SECS) {
+ return Err(LightningError{err: "Rapid Gossip Sync data is more than two weeks old".to_owned(), action: ErrorAction::IgnoreError}.into());
+ }
+ }
+
// backdate the applied timestamp by a week
let backdated_timestamp = latest_seen_timestamp.saturating_sub(24 * 3600 * 7);
if let ErrorAction::IgnoreDuplicateGossip = lightning_error.action {
// everything is fine, just a duplicate channel announcement
} else {
+ log_warn!(self.logger, "Failed to process channel announcement: {:?}", lightning_error);
return Err(lightning_error.into());
}
}
if (channel_flags & 0b_1000_0000) != 0 {
// incremental update, field flags will indicate mutated values
let read_only_network_graph = network_graph.read_only();
- if let Some(channel) = read_only_network_graph
- .channels()
- .get(&short_channel_id) {
-
- let directional_info = channel
- .get_directional_info(channel_flags)
- .ok_or(LightningError {
- err: "Couldn't find previous directional data for update".to_owned(),
- action: ErrorAction::IgnoreError,
- })?;
-
+ if let Some(directional_info) =
+ read_only_network_graph.channels().get(&short_channel_id)
+ .and_then(|channel| channel.get_directional_info(channel_flags))
+ {
synthetic_update.cltv_expiry_delta = directional_info.cltv_expiry_delta;
synthetic_update.htlc_minimum_msat = directional_info.htlc_minimum_msat;
synthetic_update.htlc_maximum_msat = directional_info.htlc_maximum_msat;
synthetic_update.fee_base_msat = directional_info.fees.base_msat;
synthetic_update.fee_proportional_millionths = directional_info.fees.proportional_millionths;
-
} else {
+ log_trace!(self.logger,
+ "Skipping application of channel update for chan {} with flags {} as original data is missing.",
+ short_channel_id, channel_flags);
skip_update_for_unknown_channel = true;
}
};
match network_graph.update_channel_unsigned(&synthetic_update) {
Ok(_) => {},
Err(LightningError { action: ErrorAction::IgnoreDuplicateGossip, .. }) => {},
- Err(LightningError { action: ErrorAction::IgnoreAndLog(_), .. }) => {},
+ Err(LightningError { action: ErrorAction::IgnoreAndLog(level), err }) => {
+ log_given_level!(self.logger, level, "Failed to apply channel update: {:?}", err);
+ },
Err(LightningError { action: ErrorAction::IgnoreError, .. }) => {},
Err(e) => return Err(e.into()),
}
#[cfg(test)]
mod tests {
- use bitcoin::blockdata::constants::genesis_block;
use bitcoin::Network;
use lightning::ln::msgs::DecodeError;
use lightning::util::test_utils::TestLogger;
use crate::error::GraphSyncError;
+ use crate::processing::STALE_RGS_UPDATE_AGE_LIMIT_SECS;
use crate::RapidGossipSync;
+ const VALID_RGS_BINARY: [u8; 300] = [
+ 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
+ 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
+ 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
+ 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
+ 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
+ 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
+ 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
+ 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
+ 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
+ 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
+ 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 4, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
+ 0, 0, 0, 1, 0, 0, 0, 0, 29, 129, 25, 192, 255, 8, 153, 192, 0, 2, 27, 0, 0, 60, 0, 0,
+ 0, 0, 0, 0, 0, 1, 0, 0, 0, 100, 0, 0, 2, 224, 0, 0, 0, 0, 58, 85, 116, 216, 0, 29, 0,
+ 0, 0, 1, 0, 0, 0, 125, 0, 0, 0, 0, 58, 85, 116, 216, 255, 2, 68, 226, 0, 6, 11, 0, 1,
+ 0, 0, 1,
+ ];
+ const VALID_BINARY_TIMESTAMP: u64 = 1642291930;
+
#[test]
fn network_graph_fails_to_update_from_clipped_input() {
- let block_hash = genesis_block(Network::Bitcoin).block_hash();
let logger = TestLogger::new();
- let network_graph = NetworkGraph::new(block_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
let example_input = vec![
76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 2, 68, 226, 0, 6, 11, 0, 1, 24, 0,
0, 3, 232, 0, 0, 0,
];
- let rapid_sync = RapidGossipSync::new(&network_graph);
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let update_result = rapid_sync.update_network_graph(&example_input[..]);
assert!(update_result.is_err());
if let Err(GraphSyncError::DecodeError(DecodeError::ShortRead)) = update_result {
68, 226, 0, 6, 11, 0, 1, 128,
];
- let block_hash = genesis_block(Network::Bitcoin).block_hash();
let logger = TestLogger::new();
- let network_graph = NetworkGraph::new(block_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
assert_eq!(network_graph.read_only().channels().len(), 0);
- let rapid_sync = RapidGossipSync::new(&network_graph);
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let update_result = rapid_sync.update_network_graph(&incremental_update_input[..]);
assert!(update_result.is_ok());
}
2, 68, 226, 0, 6, 11, 0, 1, 128,
];
- let block_hash = genesis_block(Network::Bitcoin).block_hash();
let logger = TestLogger::new();
- let network_graph = NetworkGraph::new(block_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
assert_eq!(network_graph.read_only().channels().len(), 0);
- let rapid_sync = RapidGossipSync::new(&network_graph);
- let update_result = rapid_sync.update_network_graph(&announced_update_input[..]);
- assert!(update_result.is_err());
- if let Err(GraphSyncError::LightningError(lightning_error)) = update_result {
- assert_eq!(
- lightning_error.err,
- "Couldn't find previous directional data for update"
- );
- } else {
- panic!("Unexpected update result: {:?}", update_result)
- }
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
+ rapid_sync.update_network_graph(&announced_update_input[..]).unwrap();
}
#[test]
0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
];
- let block_hash = genesis_block(Network::Bitcoin).block_hash();
let logger = TestLogger::new();
- let network_graph = NetworkGraph::new(block_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
assert_eq!(network_graph.read_only().channels().len(), 0);
- let rapid_sync = RapidGossipSync::new(&network_graph);
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let initialization_result = rapid_sync.update_network_graph(&initialization_input[..]);
if initialization_result.is_err() {
panic!(
0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 136, 0, 0, 0, 221, 255, 2,
68, 226, 0, 6, 11, 0, 1, 128,
];
- let update_result = rapid_sync.update_network_graph(&opposite_direction_incremental_update_input[..]);
- assert!(update_result.is_err());
- if let Err(GraphSyncError::LightningError(lightning_error)) = update_result {
- assert_eq!(
- lightning_error.err,
- "Couldn't find previous directional data for update"
- );
- } else {
- panic!("Unexpected update result: {:?}", update_result)
- }
+ rapid_sync.update_network_graph(&opposite_direction_incremental_update_input[..]).unwrap();
}
#[test]
25, 192,
];
- let block_hash = genesis_block(Network::Bitcoin).block_hash();
let logger = TestLogger::new();
- let network_graph = NetworkGraph::new(block_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
assert_eq!(network_graph.read_only().channels().len(), 0);
- let rapid_sync = RapidGossipSync::new(&network_graph);
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let initialization_result = rapid_sync.update_network_graph(&initialization_input[..]);
assert!(initialization_result.is_ok());
25, 192,
];
- let block_hash = genesis_block(Network::Bitcoin).block_hash();
let logger = TestLogger::new();
- let network_graph = NetworkGraph::new(block_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
assert_eq!(network_graph.read_only().channels().len(), 0);
- let rapid_sync = RapidGossipSync::new(&network_graph);
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let initialization_result = rapid_sync.update_network_graph(&initialization_input[..]);
assert!(initialization_result.is_ok());
#[test]
fn full_update_succeeds() {
- let valid_input = vec![
- 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
- 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
- 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
- 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
- 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
- 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
- 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
- 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
- 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
- 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
- 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 4, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
- 0, 0, 0, 1, 0, 0, 0, 0, 29, 129, 25, 192, 255, 8, 153, 192, 0, 2, 27, 0, 0, 60, 0, 0,
- 0, 0, 0, 0, 0, 1, 0, 0, 0, 100, 0, 0, 2, 224, 0, 0, 0, 0, 58, 85, 116, 216, 0, 29, 0,
- 0, 0, 1, 0, 0, 0, 125, 0, 0, 0, 0, 58, 85, 116, 216, 255, 2, 68, 226, 0, 6, 11, 0, 1,
- 0, 0, 1,
- ];
-
- let block_hash = genesis_block(Network::Bitcoin).block_hash();
let logger = TestLogger::new();
- let network_graph = NetworkGraph::new(block_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
assert_eq!(network_graph.read_only().channels().len(), 0);
- let rapid_sync = RapidGossipSync::new(&network_graph);
- let update_result = rapid_sync.update_network_graph(&valid_input[..]);
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
+ let update_result = rapid_sync.update_network_graph(&VALID_RGS_BINARY);
if update_result.is_err() {
panic!("Unexpected update result: {:?}", update_result)
}
assert!(after.contains("783241506229452801"));
}
+ #[test]
+ fn full_update_succeeds_at_the_beginning_of_the_unix_era() {
+ let logger = TestLogger::new();
+ let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
+
+ assert_eq!(network_graph.read_only().channels().len(), 0);
+
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
+ // this is mostly for checking uint underflow issues before the fuzzer does
+ let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(0));
+ assert!(update_result.is_ok());
+ assert_eq!(network_graph.read_only().channels().len(), 2);
+ }
+
+ #[test]
+ fn timestamp_edge_cases_are_handled_correctly() {
+ // this is the timestamp encoded in the binary data of valid_input below
+ let logger = TestLogger::new();
+
+ let latest_succeeding_time = VALID_BINARY_TIMESTAMP + STALE_RGS_UPDATE_AGE_LIMIT_SECS;
+ let earliest_failing_time = latest_succeeding_time + 1;
+
+ {
+ let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
+ assert_eq!(network_graph.read_only().channels().len(), 0);
+
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
+ let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_succeeding_time));
+ assert!(update_result.is_ok());
+ assert_eq!(network_graph.read_only().channels().len(), 2);
+ }
+
+ {
+ let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
+ assert_eq!(network_graph.read_only().channels().len(), 0);
+
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
+ let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(earliest_failing_time));
+ assert!(update_result.is_err());
+ if let Err(GraphSyncError::LightningError(lightning_error)) = update_result {
+ assert_eq!(
+ lightning_error.err,
+ "Rapid Gossip Sync data is more than two weeks old"
+ );
+ } else {
+ panic!("Unexpected update result: {:?}", update_result)
+ }
+ }
+ }
+
#[test]
pub fn update_fails_with_unknown_version() {
let unknown_version_input = vec![
0, 0, 1,
];
- let block_hash = genesis_block(Network::Bitcoin).block_hash();
let logger = TestLogger::new();
- let network_graph = NetworkGraph::new(block_hash, &logger);
- let rapid_sync = RapidGossipSync::new(&network_graph);
+ let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let update_result = rapid_sync.update_network_graph(&unknown_version_input[..]);
assert!(update_result.is_err());
--- /dev/null
+[package]
+name = "lightning-transaction-sync"
+version = "0.0.114"
+authors = ["Elias Rohrer"]
+license = "MIT OR Apache-2.0"
+repository = "http://github.com/lightningdevkit/rust-lightning"
+description = """
+Utilities for syncing LDK via the transaction-based `Confirm` interface.
+"""
+edition = "2018"
+
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = ["--cfg", "docsrs"]
+
+[features]
+default = []
+esplora-async = ["async-interface", "esplora-client/async", "futures"]
+esplora-async-https = ["esplora-async", "reqwest/rustls-tls"]
+esplora-blocking = ["esplora-client/blocking"]
+async-interface = []
+
+[dependencies]
+lightning = { version = "0.0.114", path = "../lightning", default-features = false }
+bitcoin = { version = "0.29.0", default-features = false }
+bdk-macros = "0.6"
+futures = { version = "0.3", optional = true }
+esplora-client = { version = "0.3.0", default-features = false, optional = true }
+reqwest = { version = "0.11", optional = true, default-features = false, features = ["json"] }
+
+[dev-dependencies]
+lightning = { version = "0.0.114", path = "../lightning", features = ["std"] }
+electrsd = { version = "0.22.0", features = ["legacy", "esplora_a33e97e1", "bitcoind_23_0"] }
+electrum-client = "0.12.0"
+once_cell = "1.16.0"
+tokio = { version = "1.14.0", features = ["full"] }
--- /dev/null
+use lightning::chain::WatchedOutput;
+use bitcoin::{Txid, BlockHash, Transaction, BlockHeader, OutPoint};
+
+use std::collections::{HashSet, HashMap};
+
+
+// Represents the current state.
+pub(crate) struct SyncState {
+ // Transactions that were previously processed, but must not be forgotten
+ // yet since they still need to be monitored for confirmation on-chain.
+ pub watched_transactions: HashSet<Txid>,
+ // Outputs that were previously processed, but must not be forgotten yet as
+ // as we still need to monitor any spends on-chain.
+ pub watched_outputs: HashMap<OutPoint, WatchedOutput>,
+ // The tip hash observed during our last sync.
+ pub last_sync_hash: Option<BlockHash>,
+ // Indicates whether we need to resync, e.g., after encountering an error.
+ pub pending_sync: bool,
+}
+
+impl SyncState {
+ pub fn new() -> Self {
+ Self {
+ watched_transactions: HashSet::new(),
+ watched_outputs: HashMap::new(),
+ last_sync_hash: None,
+ pending_sync: false,
+ }
+ }
+}
+
+
+// A queue that is to be filled by `Filter` and drained during the next syncing round.
+pub(crate) struct FilterQueue {
+ // Transactions that were registered via the `Filter` interface and have to be processed.
+ pub transactions: HashSet<Txid>,
+ // Outputs that were registered via the `Filter` interface and have to be processed.
+ pub outputs: HashMap<OutPoint, WatchedOutput>,
+}
+
+impl FilterQueue {
+ pub fn new() -> Self {
+ Self {
+ transactions: HashSet::new(),
+ outputs: HashMap::new(),
+ }
+ }
+
+ // Processes the transaction and output queues and adds them to the given [`SyncState`].
+ //
+ // Returns `true` if new items had been registered.
+ pub fn process_queues(&mut self, sync_state: &mut SyncState) -> bool {
+ let mut pending_registrations = false;
+
+ if !self.transactions.is_empty() {
+ pending_registrations = true;
+
+ sync_state.watched_transactions.extend(self.transactions.drain());
+ }
+
+ if !self.outputs.is_empty() {
+ pending_registrations = true;
+
+ sync_state.watched_outputs.extend(self.outputs.drain());
+ }
+ pending_registrations
+ }
+}
+
+pub(crate) struct ConfirmedTx {
+ pub tx: Transaction,
+ pub block_header: BlockHeader,
+ pub block_height: u32,
+ pub pos: usize,
+}
--- /dev/null
+use std::fmt;
+
+#[derive(Debug)]
+/// An error that possibly needs to be handled by the user.
+pub enum TxSyncError {
+ /// A transaction sync failed and needs to be retried eventually.
+ Failed,
+}
+
+impl std::error::Error for TxSyncError {}
+
+impl fmt::Display for TxSyncError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ Self::Failed => write!(f, "Failed to conduct transaction sync."),
+ }
+ }
+}
+
+#[derive(Debug)]
+#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
+pub(crate) enum InternalError {
+ /// A transaction sync failed and needs to be retried eventually.
+ Failed,
+ /// An inconsistency was encountered during transaction sync.
+ Inconsistency,
+}
+
+#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
+impl fmt::Display for InternalError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ Self::Failed => write!(f, "Failed to conduct transaction sync."),
+ Self::Inconsistency => {
+ write!(f, "Encountered an inconsistency during transaction sync.")
+ }
+ }
+ }
+}
+
+#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
+impl std::error::Error for InternalError {}
+
+#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
+impl From<esplora_client::Error> for TxSyncError {
+ fn from(_e: esplora_client::Error) -> Self {
+ Self::Failed
+ }
+}
+
+#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
+impl From<esplora_client::Error> for InternalError {
+ fn from(_e: esplora_client::Error) -> Self {
+ Self::Failed
+ }
+}
+
+#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
+impl From<InternalError> for TxSyncError {
+ fn from(_e: InternalError) -> Self {
+ Self::Failed
+ }
+}
--- /dev/null
+use crate::error::{TxSyncError, InternalError};
+use crate::common::{SyncState, FilterQueue, ConfirmedTx};
+
+use lightning::util::logger::Logger;
+use lightning::{log_error, log_info, log_debug, log_trace};
+use lightning::chain::WatchedOutput;
+use lightning::chain::{Confirm, Filter};
+
+use bitcoin::{BlockHash, Script, Txid};
+
+use esplora_client::Builder;
+#[cfg(feature = "async-interface")]
+use esplora_client::r#async::AsyncClient;
+#[cfg(not(feature = "async-interface"))]
+use esplora_client::blocking::BlockingClient;
+
+use std::collections::HashSet;
+use core::ops::Deref;
+
+/// Synchronizes LDK with a given [`Esplora`] server.
+///
+/// Needs to be registered with a [`ChainMonitor`] via the [`Filter`] interface to be informed of
+/// transactions and outputs to monitor for on-chain confirmation, unconfirmation, and
+/// reconfirmation.
+///
+/// Note that registration via [`Filter`] needs to happen before any calls to
+/// [`Watch::watch_channel`] to ensure we get notified of the items to monitor.
+///
+/// This uses and exposes either a blocking or async client variant dependent on whether the
+/// `esplora-blocking` or the `esplora-async` feature is enabled.
+///
+/// [`Esplora`]: https://github.com/Blockstream/electrs
+/// [`ChainMonitor`]: lightning::chain::chainmonitor::ChainMonitor
+/// [`Watch::watch_channel`]: lightning::chain::Watch::watch_channel
+/// [`Filter`]: lightning::chain::Filter
+pub struct EsploraSyncClient<L: Deref>
+where
+ L::Target: Logger,
+{
+ sync_state: MutexType<SyncState>,
+ queue: std::sync::Mutex<FilterQueue>,
+ client: EsploraClientType,
+ logger: L,
+}
+
+impl<L: Deref> EsploraSyncClient<L>
+where
+ L::Target: Logger,
+{
+ /// Returns a new [`EsploraSyncClient`] object.
+ pub fn new(server_url: String, logger: L) -> Self {
+ let builder = Builder::new(&server_url);
+ #[cfg(not(feature = "async-interface"))]
+ let client = builder.build_blocking().unwrap();
+ #[cfg(feature = "async-interface")]
+ let client = builder.build_async().unwrap();
+
+ EsploraSyncClient::from_client(client, logger)
+ }
+
+ /// Returns a new [`EsploraSyncClient`] object using the given Esplora client.
+ pub fn from_client(client: EsploraClientType, logger: L) -> Self {
+ let sync_state = MutexType::new(SyncState::new());
+ let queue = std::sync::Mutex::new(FilterQueue::new());
+ Self {
+ sync_state,
+ queue,
+ client,
+ logger,
+ }
+ }
+
+ /// Synchronizes the given `confirmables` via their [`Confirm`] interface implementations. This
+ /// method should be called regularly to keep LDK up-to-date with current chain data.
+ ///
+ /// For example, instances of [`ChannelManager`] and [`ChainMonitor`] can be informed about the
+ /// newest on-chain activity related to the items previously registered via the [`Filter`]
+ /// interface.
+ ///
+ /// [`Confirm`]: lightning::chain::Confirm
+ /// [`ChainMonitor`]: lightning::chain::chainmonitor::ChainMonitor
+ /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
+ /// [`Filter`]: lightning::chain::Filter
+ #[maybe_async]
+ pub fn sync(&self, confirmables: Vec<&(dyn Confirm + Sync + Send)>) -> Result<(), TxSyncError> {
+ // This lock makes sure we're syncing once at a time.
+ #[cfg(not(feature = "async-interface"))]
+ let mut sync_state = self.sync_state.lock().unwrap();
+ #[cfg(feature = "async-interface")]
+ let mut sync_state = self.sync_state.lock().await;
+
+ log_info!(self.logger, "Starting transaction sync.");
+
+ let mut tip_hash = maybe_await!(self.client.get_tip_hash())?;
+
+ loop {
+ let pending_registrations = self.queue.lock().unwrap().process_queues(&mut sync_state);
+ let tip_is_new = Some(tip_hash) != sync_state.last_sync_hash;
+
+ // We loop until any registered transactions have been processed at least once, or the
+ // tip hasn't been updated during the last iteration.
+ if !sync_state.pending_sync && !pending_registrations && !tip_is_new {
+ // Nothing to do.
+ break;
+ } else {
+ // Update the known tip to the newest one.
+ if tip_is_new {
+ // First check for any unconfirmed transactions and act on it immediately.
+ match maybe_await!(self.get_unconfirmed_transactions(&confirmables)) {
+ Ok(unconfirmed_txs) => {
+ // Double-check the tip hash. If it changed, a reorg happened since
+ // we started syncing and we need to restart last-minute.
+ let check_tip_hash = maybe_await!(self.client.get_tip_hash())?;
+ if check_tip_hash != tip_hash {
+ tip_hash = check_tip_hash;
+ continue;
+ }
+
+ self.sync_unconfirmed_transactions(&mut sync_state, &confirmables, unconfirmed_txs);
+ },
+ Err(err) => {
+ // (Semi-)permanent failure, retry later.
+ log_error!(self.logger, "Failed during transaction sync, aborting.");
+ sync_state.pending_sync = true;
+ return Err(TxSyncError::from(err));
+ }
+ }
+
+ match maybe_await!(self.sync_best_block_updated(&confirmables, &tip_hash)) {
+ Ok(()) => {}
+ Err(InternalError::Inconsistency) => {
+ // Immediately restart syncing when we encounter any inconsistencies.
+ log_debug!(self.logger, "Encountered inconsistency during transaction sync, restarting.");
+ sync_state.pending_sync = true;
+ continue;
+ }
+ Err(err) => {
+ // (Semi-)permanent failure, retry later.
+ sync_state.pending_sync = true;
+ return Err(TxSyncError::from(err));
+ }
+ }
+ }
+
+ match maybe_await!(self.get_confirmed_transactions(&sync_state)) {
+ Ok(confirmed_txs) => {
+ // Double-check the tip hash. If it changed, a reorg happened since
+ // we started syncing and we need to restart last-minute.
+ let check_tip_hash = maybe_await!(self.client.get_tip_hash())?;
+ if check_tip_hash != tip_hash {
+ tip_hash = check_tip_hash;
+ continue;
+ }
+
+ self.sync_confirmed_transactions(
+ &mut sync_state,
+ &confirmables,
+ confirmed_txs,
+ );
+ }
+ Err(InternalError::Inconsistency) => {
+ // Immediately restart syncing when we encounter any inconsistencies.
+ log_debug!(self.logger, "Encountered inconsistency during transaction sync, restarting.");
+ sync_state.pending_sync = true;
+ continue;
+ }
+ Err(err) => {
+ // (Semi-)permanent failure, retry later.
+ log_error!(self.logger, "Failed during transaction sync, aborting.");
+ sync_state.pending_sync = true;
+ return Err(TxSyncError::from(err));
+ }
+ }
+ sync_state.last_sync_hash = Some(tip_hash);
+ sync_state.pending_sync = false;
+ }
+ }
+ log_info!(self.logger, "Finished transaction sync.");
+ Ok(())
+ }
+
+ #[maybe_async]
+ fn sync_best_block_updated(
+ &self, confirmables: &Vec<&(dyn Confirm + Sync + Send)>, tip_hash: &BlockHash,
+ ) -> Result<(), InternalError> {
+
+ // Inform the interface of the new block.
+ let tip_header = maybe_await!(self.client.get_header_by_hash(tip_hash))?;
+ let tip_status = maybe_await!(self.client.get_block_status(&tip_hash))?;
+ if tip_status.in_best_chain {
+ if let Some(tip_height) = tip_status.height {
+ for c in confirmables {
+ c.best_block_updated(&tip_header, tip_height);
+ }
+ }
+ } else {
+ return Err(InternalError::Inconsistency);
+ }
+ Ok(())
+ }
+
+ fn sync_confirmed_transactions(
+ &self, sync_state: &mut SyncState, confirmables: &Vec<&(dyn Confirm + Sync + Send)>, confirmed_txs: Vec<ConfirmedTx>,
+ ) {
+ for ctx in confirmed_txs {
+ for c in confirmables {
+ c.transactions_confirmed(
+ &ctx.block_header,
+ &[(ctx.pos, &ctx.tx)],
+ ctx.block_height,
+ );
+ }
+
+ sync_state.watched_transactions.remove(&ctx.tx.txid());
+
+ for input in &ctx.tx.input {
+ sync_state.watched_outputs.remove(&input.previous_output);
+ }
+ }
+ }
+
+ #[maybe_async]
+ fn get_confirmed_transactions(
+ &self, sync_state: &SyncState,
+ ) -> Result<Vec<ConfirmedTx>, InternalError> {
+
+ // First, check the confirmation status of registered transactions as well as the
+ // status of dependent transactions of registered outputs.
+
+ let mut confirmed_txs = Vec::new();
+
+ for txid in &sync_state.watched_transactions {
+ if let Some(confirmed_tx) = maybe_await!(self.get_confirmed_tx(&txid, None, None))? {
+ confirmed_txs.push(confirmed_tx);
+ }
+ }
+
+ for (_, output) in &sync_state.watched_outputs {
+ if let Some(output_status) = maybe_await!(self.client
+ .get_output_status(&output.outpoint.txid, output.outpoint.index as u64))?
+ {
+ if let Some(spending_txid) = output_status.txid {
+ if let Some(spending_tx_status) = output_status.status {
+ if let Some(confirmed_tx) = maybe_await!(self
+ .get_confirmed_tx(
+ &spending_txid,
+ spending_tx_status.block_hash,
+ spending_tx_status.block_height,
+ ))?
+ {
+ confirmed_txs.push(confirmed_tx);
+ }
+ }
+ }
+ }
+ }
+
+ // Sort all confirmed transactions first by block height, then by in-block
+ // position, and finally feed them to the interface in order.
+ confirmed_txs.sort_unstable_by(|tx1, tx2| {
+ tx1.block_height.cmp(&tx2.block_height).then_with(|| tx1.pos.cmp(&tx2.pos))
+ });
+
+ Ok(confirmed_txs)
+ }
+
+ #[maybe_async]
+ fn get_confirmed_tx(
+ &self, txid: &Txid, expected_block_hash: Option<BlockHash>, known_block_height: Option<u32>,
+ ) -> Result<Option<ConfirmedTx>, InternalError> {
+ if let Some(merkle_block) = maybe_await!(self.client.get_merkle_block(&txid))? {
+ let block_header = merkle_block.header;
+ let block_hash = block_header.block_hash();
+ if let Some(expected_block_hash) = expected_block_hash {
+ if expected_block_hash != block_hash {
+ log_trace!(self.logger, "Inconsistency: Tx {} expected in block {}, but is confirmed in {}", txid, expected_block_hash, block_hash);
+ return Err(InternalError::Inconsistency);
+ }
+ }
+
+ let mut matches = Vec::new();
+ let mut indexes = Vec::new();
+ let _ = merkle_block.txn.extract_matches(&mut matches, &mut indexes);
+ if indexes.len() != 1 || matches.len() != 1 || matches[0] != *txid {
+ log_error!(self.logger, "Retrieved Merkle block for txid {} doesn't match expectations. This should not happen. Please verify server integrity.", txid);
+ return Err(InternalError::Failed);
+ }
+
+ let pos = *indexes.get(0).ok_or(InternalError::Failed)? as usize;
+ if let Some(tx) = maybe_await!(self.client.get_tx(&txid))? {
+ if let Some(block_height) = known_block_height {
+ // We can take a shortcut here if a previous call already gave us the height.
+ return Ok(Some(ConfirmedTx { tx, block_header, pos, block_height }));
+ }
+
+ let block_status = maybe_await!(self.client.get_block_status(&block_hash))?;
+ if let Some(block_height) = block_status.height {
+ return Ok(Some(ConfirmedTx { tx, block_header, pos, block_height }));
+ } else {
+ // If any previously-confirmed block suddenly is no longer confirmed, we found
+ // an inconsistency and should start over.
+ log_trace!(self.logger, "Inconsistency: Tx {} was unconfirmed during syncing.", txid);
+ return Err(InternalError::Inconsistency);
+ }
+ }
+ }
+ Ok(None)
+ }
+
+ #[maybe_async]
+ fn get_unconfirmed_transactions(
+ &self, confirmables: &Vec<&(dyn Confirm + Sync + Send)>,
+ ) -> Result<Vec<Txid>, InternalError> {
+ // Query the interface for relevant txids and check whether the relevant blocks are still
+ // in the best chain, mark them unconfirmed otherwise
+ let relevant_txids = confirmables
+ .iter()
+ .flat_map(|c| c.get_relevant_txids())
+ .collect::<HashSet<(Txid, Option<BlockHash>)>>();
+
+ let mut unconfirmed_txs = Vec::new();
+
+ for (txid, block_hash_opt) in relevant_txids {
+ if let Some(block_hash) = block_hash_opt {
+ let block_status = maybe_await!(self.client.get_block_status(&block_hash))?;
+ if block_status.in_best_chain {
+ // Skip if the block in question is still confirmed.
+ continue;
+ }
+
+ unconfirmed_txs.push(txid);
+ } else {
+ log_error!(self.logger, "Untracked confirmation of funding transaction. Please ensure none of your channels had been created with LDK prior to version 0.0.113!");
+ panic!("Untracked confirmation of funding transaction. Please ensure none of your channels had been created with LDK prior to version 0.0.113!");
+ }
+ }
+ Ok(unconfirmed_txs)
+ }
+
+ fn sync_unconfirmed_transactions(
+ &self, sync_state: &mut SyncState, confirmables: &Vec<&(dyn Confirm + Sync + Send)>, unconfirmed_txs: Vec<Txid>,
+ ) {
+ for txid in unconfirmed_txs {
+ for c in confirmables {
+ c.transaction_unconfirmed(&txid);
+ }
+
+ sync_state.watched_transactions.insert(txid);
+ }
+ }
+
+ /// Returns a reference to the underlying esplora client.
+ pub fn client(&self) -> &EsploraClientType {
+ &self.client
+ }
+}
+
+#[cfg(feature = "async-interface")]
+type MutexType<I> = futures::lock::Mutex<I>;
+#[cfg(not(feature = "async-interface"))]
+type MutexType<I> = std::sync::Mutex<I>;
+
+// The underlying client type.
+#[cfg(feature = "async-interface")]
+type EsploraClientType = AsyncClient;
+#[cfg(not(feature = "async-interface"))]
+type EsploraClientType = BlockingClient;
+
+
+impl<L: Deref> Filter for EsploraSyncClient<L>
+where
+ L::Target: Logger,
+{
+ fn register_tx(&self, txid: &Txid, _script_pubkey: &Script) {
+ let mut locked_queue = self.queue.lock().unwrap();
+ locked_queue.transactions.insert(*txid);
+ }
+
+ fn register_output(&self, output: WatchedOutput) {
+ let mut locked_queue = self.queue.lock().unwrap();
+ locked_queue.outputs.insert(output.outpoint.into_bitcoin_outpoint(), output);
+ }
+}
--- /dev/null
+//! Provides utilities for syncing LDK via the transaction-based [`Confirm`] interface.
+//!
+//! The provided synchronization clients need to be registered with a [`ChainMonitor`] via the
+//! [`Filter`] interface. Then, the respective `fn sync` needs to be called with the [`Confirm`]
+//! implementations to be synchronized, i.e., usually instances of [`ChannelManager`] and
+//! [`ChainMonitor`].
+//!
+//! ## Features and Backend Support
+//!
+//!- `esplora-blocking` enables syncing against an Esplora backend based on a blocking client.
+//!- `esplora-async` enables syncing against an Esplora backend based on an async client.
+//!- `esplora-async-https` enables the async Esplora client with support for HTTPS.
+//!
+//! ## Version Compatibility
+//!
+//! Currently this crate is compatible with nodes that were created with LDK version 0.0.113 and above.
+//!
+//! ## Usage Example:
+//!
+//! ```ignore
+//! let tx_sync = Arc::new(EsploraSyncClient::new(
+//! esplora_server_url,
+//! Arc::clone(&some_logger),
+//! ));
+//!
+//! let chain_monitor = Arc::new(ChainMonitor::new(
+//! Some(Arc::clone(&tx_sync)),
+//! Arc::clone(&some_broadcaster),
+//! Arc::clone(&some_logger),
+//! Arc::clone(&some_fee_estimator),
+//! Arc::clone(&some_persister),
+//! ));
+//!
+//! let channel_manager = Arc::new(ChannelManager::new(
+//! Arc::clone(&some_fee_estimator),
+//! Arc::clone(&chain_monitor),
+//! Arc::clone(&some_broadcaster),
+//! Arc::clone(&some_router),
+//! Arc::clone(&some_logger),
+//! Arc::clone(&some_entropy_source),
+//! Arc::clone(&some_node_signer),
+//! Arc::clone(&some_signer_provider),
+//! user_config,
+//! chain_params,
+//! ));
+//!
+//! let confirmables = vec![
+//! &*channel_manager as &(dyn Confirm + Sync + Send),
+//! &*chain_monitor as &(dyn Confirm + Sync + Send),
+//! ];
+//!
+//! tx_sync.sync(confirmables).unwrap();
+//! ```
+//!
+//! [`Confirm`]: lightning::chain::Confirm
+//! [`Filter`]: lightning::chain::Filter
+//! [`ChainMonitor`]: lightning::chain::chainmonitor::ChainMonitor
+//! [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
+
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
+#![deny(broken_intra_doc_links)]
+#![deny(private_intra_doc_links)]
+
+#![deny(missing_docs)]
+#![deny(unsafe_code)]
+
+#![cfg_attr(docsrs, feature(doc_auto_cfg))]
+
+#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
+#[macro_use]
+extern crate bdk_macros;
+
+#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
+mod esplora;
+
+#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
+mod common;
+
+mod error;
+pub use error::TxSyncError;
+
+#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
+pub use esplora::EsploraSyncClient;
--- /dev/null
+#![cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
+use lightning_transaction_sync::EsploraSyncClient;
+use lightning::chain::{Confirm, Filter};
+use lightning::chain::transaction::TransactionData;
+use lightning::util::logger::{Logger, Record};
+
+use electrsd::{bitcoind, bitcoind::BitcoinD, ElectrsD};
+use bitcoin::{Amount, Txid, BlockHash, BlockHeader};
+use bitcoin::blockdata::constants::genesis_block;
+use bitcoin::network::constants::Network;
+use electrsd::bitcoind::bitcoincore_rpc::bitcoincore_rpc_json::AddressType;
+use bitcoind::bitcoincore_rpc::RpcApi;
+use electrum_client::ElectrumApi;
+
+use once_cell::sync::OnceCell;
+
+use std::env;
+use std::sync::Mutex;
+use std::time::Duration;
+use std::collections::{HashMap, HashSet};
+
+static BITCOIND: OnceCell<BitcoinD> = OnceCell::new();
+static ELECTRSD: OnceCell<ElectrsD> = OnceCell::new();
+static PREMINE: OnceCell<()> = OnceCell::new();
+static MINER_LOCK: OnceCell<Mutex<()>> = OnceCell::new();
+
+fn get_bitcoind() -> &'static BitcoinD {
+ BITCOIND.get_or_init(|| {
+ let bitcoind_exe =
+ env::var("BITCOIND_EXE").ok().or_else(|| bitcoind::downloaded_exe_path().ok()).expect(
+ "you need to provide an env var BITCOIND_EXE or specify a bitcoind version feature",
+ );
+ let mut conf = bitcoind::Conf::default();
+ conf.network = "regtest";
+ let bitcoind = BitcoinD::with_conf(bitcoind_exe, &conf).unwrap();
+ std::thread::sleep(Duration::from_secs(1));
+ bitcoind
+ })
+}
+
+fn get_electrsd() -> &'static ElectrsD {
+ ELECTRSD.get_or_init(|| {
+ let bitcoind = get_bitcoind();
+ let electrs_exe =
+ env::var("ELECTRS_EXE").ok().or_else(electrsd::downloaded_exe_path).expect(
+ "you need to provide env var ELECTRS_EXE or specify an electrsd version feature",
+ );
+ let mut conf = electrsd::Conf::default();
+ conf.http_enabled = true;
+ conf.network = "regtest";
+ let electrsd = ElectrsD::with_conf(electrs_exe, &bitcoind, &conf).unwrap();
+ std::thread::sleep(Duration::from_secs(1));
+ electrsd
+ })
+}
+
+fn generate_blocks_and_wait(num: usize) {
+ let miner_lock = MINER_LOCK.get_or_init(|| Mutex::new(()));
+ let _miner = miner_lock.lock().unwrap();
+ let cur_height = get_bitcoind().client.get_block_count().expect("failed to get current block height");
+ let address = get_bitcoind().client.get_new_address(Some("test"), Some(AddressType::Legacy)).expect("failed to get new address");
+ // TODO: expect this Result once the WouldBlock issue is resolved upstream.
+ let _block_hashes_res = get_bitcoind().client.generate_to_address(num as u64, &address);
+ wait_for_block(cur_height as usize + num);
+}
+
+fn wait_for_block(min_height: usize) {
+ let mut header = match get_electrsd().client.block_headers_subscribe() {
+ Ok(header) => header,
+ Err(_) => {
+ // While subscribing should succeed the first time around, we ran into some cases where
+ // it didn't. Since we can't proceed without subscribing, we try again after a delay
+ // and panic if it still fails.
+ std::thread::sleep(Duration::from_secs(1));
+ get_electrsd().client.block_headers_subscribe().expect("failed to subscribe to block headers")
+ }
+ };
+
+ loop {
+ if header.height >= min_height {
+ break;
+ }
+ header = exponential_backoff_poll(|| {
+ get_electrsd().trigger().expect("failed to trigger electrsd");
+ get_electrsd().client.ping().expect("failed to ping electrsd");
+ get_electrsd().client.block_headers_pop().expect("failed to pop block header")
+ });
+ }
+}
+
+fn exponential_backoff_poll<T, F>(mut poll: F) -> T
+where
+ F: FnMut() -> Option<T>,
+{
+ let mut delay = Duration::from_millis(64);
+ let mut tries = 0;
+ loop {
+ match poll() {
+ Some(data) => break data,
+ None if delay.as_millis() < 512 => {
+ delay = delay.mul_f32(2.0);
+ tries += 1;
+ }
+ None if tries == 10 => panic!("Exceeded our maximum wait time."),
+ None => tries += 1,
+ }
+
+ std::thread::sleep(delay);
+ }
+}
+
+fn premine() {
+ PREMINE.get_or_init(|| {
+ generate_blocks_and_wait(101);
+ });
+}
+
+#[derive(Debug)]
+enum TestConfirmableEvent {
+ Confirmed(Txid, BlockHash, u32),
+ Unconfirmed(Txid),
+ BestBlockUpdated(BlockHash, u32),
+}
+
+struct TestConfirmable {
+ pub confirmed_txs: Mutex<HashMap<Txid, (BlockHash, u32)>>,
+ pub unconfirmed_txs: Mutex<HashSet<Txid>>,
+ pub best_block: Mutex<(BlockHash, u32)>,
+ pub events: Mutex<Vec<TestConfirmableEvent>>,
+}
+
+impl TestConfirmable {
+ pub fn new() -> Self {
+ let genesis_hash = genesis_block(Network::Regtest).block_hash();
+ Self {
+ confirmed_txs: Mutex::new(HashMap::new()),
+ unconfirmed_txs: Mutex::new(HashSet::new()),
+ best_block: Mutex::new((genesis_hash, 0)),
+ events: Mutex::new(Vec::new()),
+ }
+ }
+}
+
+impl Confirm for TestConfirmable {
+ fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData<'_>, height: u32) {
+ for (_, tx) in txdata {
+ let txid = tx.txid();
+ let block_hash = header.block_hash();
+ self.confirmed_txs.lock().unwrap().insert(txid, (block_hash, height));
+ self.unconfirmed_txs.lock().unwrap().remove(&txid);
+ self.events.lock().unwrap().push(TestConfirmableEvent::Confirmed(txid, block_hash, height));
+ }
+ }
+
+ fn transaction_unconfirmed(&self, txid: &Txid) {
+ self.unconfirmed_txs.lock().unwrap().insert(*txid);
+ self.confirmed_txs.lock().unwrap().remove(txid);
+ self.events.lock().unwrap().push(TestConfirmableEvent::Unconfirmed(*txid));
+ }
+
+ fn best_block_updated(&self, header: &BlockHeader, height: u32) {
+ let block_hash = header.block_hash();
+ *self.best_block.lock().unwrap() = (block_hash, height);
+ self.events.lock().unwrap().push(TestConfirmableEvent::BestBlockUpdated(block_hash, height));
+ }
+
+ fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
+ self.confirmed_txs.lock().unwrap().iter().map(|(&txid, (hash, _))| (txid, Some(*hash))).collect::<Vec<_>>()
+ }
+}
+
+pub struct TestLogger {}
+
+impl Logger for TestLogger {
+ fn log(&self, record: &Record) {
+ println!("{} -- {}",
+ record.level,
+ record.args);
+ }
+}
+
+#[test]
+#[cfg(feature = "esplora-blocking")]
+fn test_esplora_syncs() {
+ premine();
+ let mut logger = TestLogger {};
+ let esplora_url = format!("http://{}", get_electrsd().esplora_url.as_ref().unwrap());
+ let tx_sync = EsploraSyncClient::new(esplora_url, &mut logger);
+ let confirmable = TestConfirmable::new();
+
+ // Check we pick up on new best blocks
+ let expected_height = 0u32;
+ assert_eq!(confirmable.best_block.lock().unwrap().1, expected_height);
+
+ tx_sync.sync(vec![&confirmable]).unwrap();
+
+ let expected_height = get_bitcoind().client.get_block_count().unwrap() as u32;
+ assert_eq!(confirmable.best_block.lock().unwrap().1, expected_height);
+
+ let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
+ assert_eq!(events.len(), 1);
+
+ // Check registered confirmed transactions are marked confirmed
+ let new_address = get_bitcoind().client.get_new_address(Some("test"), Some(AddressType::Legacy)).unwrap();
+ let txid = get_bitcoind().client.send_to_address(&new_address, Amount::from_sat(5000), None, None, None, None, None, None).unwrap();
+ tx_sync.register_tx(&txid, &new_address.script_pubkey());
+
+ tx_sync.sync(vec![&confirmable]).unwrap();
+
+ let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
+ assert_eq!(events.len(), 0);
+ assert!(confirmable.confirmed_txs.lock().unwrap().is_empty());
+ assert!(confirmable.unconfirmed_txs.lock().unwrap().is_empty());
+
+ generate_blocks_and_wait(1);
+ tx_sync.sync(vec![&confirmable]).unwrap();
+
+ let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
+ assert_eq!(events.len(), 2);
+ assert!(confirmable.confirmed_txs.lock().unwrap().contains_key(&txid));
+ assert!(confirmable.unconfirmed_txs.lock().unwrap().is_empty());
+
+ // Check previously confirmed transactions are marked unconfirmed when they are reorged.
+ let best_block_hash = get_bitcoind().client.get_best_block_hash().unwrap();
+ get_bitcoind().client.invalidate_block(&best_block_hash).unwrap();
+
+ // We're getting back to the previous height with a new tip, but best block shouldn't change.
+ generate_blocks_and_wait(1);
+ assert_ne!(get_bitcoind().client.get_best_block_hash().unwrap(), best_block_hash);
+ tx_sync.sync(vec![&confirmable]).unwrap();
+ let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
+ assert_eq!(events.len(), 0);
+
+ // Now we're surpassing previous height, getting new tip.
+ generate_blocks_and_wait(1);
+ assert_ne!(get_bitcoind().client.get_best_block_hash().unwrap(), best_block_hash);
+ tx_sync.sync(vec![&confirmable]).unwrap();
+
+ // Transaction still confirmed but under new tip.
+ assert!(confirmable.confirmed_txs.lock().unwrap().contains_key(&txid));
+ assert!(confirmable.unconfirmed_txs.lock().unwrap().is_empty());
+
+ // Check we got unconfirmed, then reconfirmed in the meantime.
+ let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
+ assert_eq!(events.len(), 3);
+
+ match events[0] {
+ TestConfirmableEvent::Unconfirmed(t) => {
+ assert_eq!(t, txid);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ match events[1] {
+ TestConfirmableEvent::BestBlockUpdated(..) => {},
+ _ => panic!("Unexpected event"),
+ }
+
+ match events[2] {
+ TestConfirmableEvent::Confirmed(t, _, _) => {
+ assert_eq!(t, txid);
+ },
+ _ => panic!("Unexpected event"),
+ }
+}
+
+#[tokio::test]
+#[cfg(feature = "esplora-async")]
+async fn test_esplora_syncs() {
+ premine();
+ let mut logger = TestLogger {};
+ let esplora_url = format!("http://{}", get_electrsd().esplora_url.as_ref().unwrap());
+ let tx_sync = EsploraSyncClient::new(esplora_url, &mut logger);
+ let confirmable = TestConfirmable::new();
+
+ // Check we pick up on new best blocks
+ let expected_height = 0u32;
+ assert_eq!(confirmable.best_block.lock().unwrap().1, expected_height);
+
+ tx_sync.sync(vec![&confirmable]).await.unwrap();
+
+ let expected_height = get_bitcoind().client.get_block_count().unwrap() as u32;
+ assert_eq!(confirmable.best_block.lock().unwrap().1, expected_height);
+
+ let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
+ assert_eq!(events.len(), 1);
+
+ // Check registered confirmed transactions are marked confirmed
+ let new_address = get_bitcoind().client.get_new_address(Some("test"), Some(AddressType::Legacy)).unwrap();
+ let txid = get_bitcoind().client.send_to_address(&new_address, Amount::from_sat(5000), None, None, None, None, None, None).unwrap();
+ tx_sync.register_tx(&txid, &new_address.script_pubkey());
+
+ tx_sync.sync(vec![&confirmable]).await.unwrap();
+
+ let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
+ assert_eq!(events.len(), 0);
+ assert!(confirmable.confirmed_txs.lock().unwrap().is_empty());
+ assert!(confirmable.unconfirmed_txs.lock().unwrap().is_empty());
+
+ generate_blocks_and_wait(1);
+ tx_sync.sync(vec![&confirmable]).await.unwrap();
+
+ let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
+ assert_eq!(events.len(), 2);
+ assert!(confirmable.confirmed_txs.lock().unwrap().contains_key(&txid));
+ assert!(confirmable.unconfirmed_txs.lock().unwrap().is_empty());
+
+ // Check previously confirmed transactions are marked unconfirmed when they are reorged.
+ let best_block_hash = get_bitcoind().client.get_best_block_hash().unwrap();
+ get_bitcoind().client.invalidate_block(&best_block_hash).unwrap();
+
+ // We're getting back to the previous height with a new tip, but best block shouldn't change.
+ generate_blocks_and_wait(1);
+ assert_ne!(get_bitcoind().client.get_best_block_hash().unwrap(), best_block_hash);
+ tx_sync.sync(vec![&confirmable]).await.unwrap();
+ let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
+ assert_eq!(events.len(), 0);
+
+ // Now we're surpassing previous height, getting new tip.
+ generate_blocks_and_wait(1);
+ assert_ne!(get_bitcoind().client.get_best_block_hash().unwrap(), best_block_hash);
+ tx_sync.sync(vec![&confirmable]).await.unwrap();
+
+ // Transaction still confirmed but under new tip.
+ assert!(confirmable.confirmed_txs.lock().unwrap().contains_key(&txid));
+ assert!(confirmable.unconfirmed_txs.lock().unwrap().is_empty());
+
+ // Check we got unconfirmed, then reconfirmed in the meantime.
+ let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
+ assert_eq!(events.len(), 3);
+
+ match events[0] {
+ TestConfirmableEvent::Unconfirmed(t) => {
+ assert_eq!(t, txid);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ match events[1] {
+ TestConfirmableEvent::BestBlockUpdated(..) => {},
+ _ => panic!("Unexpected event"),
+ }
+
+ match events[2] {
+ TestConfirmableEvent::Confirmed(t, _, _) => {
+ assert_eq!(t, txid);
+ },
+ _ => panic!("Unexpected event"),
+ }
+}
+
+#[tokio::test]
+#[cfg(any(feature = "esplora-async-https", feature = "esplora-blocking"))]
+async fn test_esplora_connects_to_public_server() {
+ let mut logger = TestLogger {};
+ let esplora_url = "https://blockstream.info/api".to_string();
+ let tx_sync = EsploraSyncClient::new(esplora_url, &mut logger);
+ let confirmable = TestConfirmable::new();
+
+ // Check we connect and pick up on new best blocks
+ assert_eq!(confirmable.best_block.lock().unwrap().1, 0);
+ #[cfg(feature = "esplora-async-https")]
+ tx_sync.sync(vec![&confirmable]).await.unwrap();
+ #[cfg(feature = "esplora-blocking")]
+ tx_sync.sync(vec![&confirmable]).unwrap();
+ assert_ne!(confirmable.best_block.lock().unwrap().1, 0);
+}
[package]
name = "lightning"
-version = "0.0.113"
+version = "0.0.114"
authors = ["Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning/"
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::ChannelMessageHandler;
use crate::util::errors::APIError;
- use crate::util::events::{ClosureReason, MessageSendEvent, MessageSendEventsProvider};
+ use crate::util::events::{Event, ClosureReason, MessageSendEvent, MessageSendEventsProvider};
#[test]
fn test_async_ooo_offchain_updates() {
nodes[1].node.claim_funds(payment_preimage_1);
check_added_monitors!(nodes[1], 1);
- expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
nodes[1].node.claim_funds(payment_preimage_2);
check_added_monitors!(nodes[1], 1);
- expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone();
assert_eq!(persistences.len(), 1);
.find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
+ let claim_events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(claim_events.len(), 2);
+ match claim_events[0] {
+ Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
+ assert_eq!(payment_hash_1, *payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match claim_events[1] {
+ Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
+ assert_eq!(payment_hash_2, *payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
// Now manually walk the commitment signed dance - because we claimed two payments
// back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
use crate::ln::msgs::DecodeError;
use crate::ln::chan_utils;
use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCClaim, ChannelTransactionParameters, HolderCommitmentTransaction};
-use crate::ln::channelmanager::HTLCSource;
+use crate::ln::channelmanager::{HTLCSource, SentHTLCId};
use crate::chain;
use crate::chain::{BestBlock, WatchedOutput};
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator};
use crate::chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedOutput, RevokedHTLCOutput};
use crate::chain::Filter;
use crate::util::logger::Logger;
-use crate::util::ser::{Readable, ReadableArgs, MaybeReadable, Writer, Writeable, U48, OptionDeserWrapper};
+use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48};
use crate::util::byte_utils;
use crate::util::events::Event;
#[cfg(anchors)]
use crate::io::{self, Error};
use core::convert::TryInto;
use core::ops::Deref;
-use crate::sync::Mutex;
+use crate::sync::{Mutex, LockTestExt};
/// An update generated by the underlying channel itself which contains some new information the
/// [`ChannelMonitor`] should be made aware of.
}
}
- let mut counterparty_delayed_payment_base_key = OptionDeserWrapper(None);
- let mut counterparty_htlc_base_key = OptionDeserWrapper(None);
+ let mut counterparty_delayed_payment_base_key = RequiredWrapper(None);
+ let mut counterparty_htlc_base_key = RequiredWrapper(None);
let mut on_counterparty_tx_csv: u16 = 0;
read_tlv_fields!(r, {
(0, counterparty_delayed_payment_base_key, required),
let mut transaction = None;
let mut block_hash = None;
let mut height = 0;
- let mut event = None;
+ let mut event = UpgradableRequired(None);
read_tlv_fields!(reader, {
(0, txid, required),
(1, transaction, option),
(2, height, required),
(3, block_hash, option),
- (4, event, ignorable),
+ (4, event, upgradable_required),
});
- if let Some(ev) = event {
- Ok(Some(Self { txid, transaction, height, block_hash, event: ev }))
- } else {
- Ok(None)
- }
+ Ok(Some(Self { txid, transaction, height, block_hash, event: _init_tlv_based_struct_field!(event, upgradable_required) }))
}
}
LatestHolderCommitmentTXInfo {
commitment_tx: HolderCommitmentTransaction,
htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
+ claimed_htlcs: Vec<(SentHTLCId, PaymentPreimage)>,
},
LatestCounterpartyCommitmentTXInfo {
commitment_txid: Txid,
impl_writeable_tlv_based_enum_upgradable!(ChannelMonitorUpdateStep,
(0, LatestHolderCommitmentTXInfo) => {
(0, commitment_tx, required),
+ (1, claimed_htlcs, vec_type),
(2, htlc_outputs, vec_type),
},
(1, LatestCounterpartyCommitmentTXInfo) => {
/// Serialized to disk but should generally not be sent to Watchtowers.
counterparty_hash_commitment_number: HashMap<PaymentHash, u64>,
+ counterparty_fulfilled_htlcs: HashMap<SentHTLCId, PaymentPreimage>,
+
// We store two holder commitment transactions to avoid any race conditions where we may update
// some monitors (potentially on watchtowers) but then fail to update others, resulting in the
// various monitors for one channel being out of sync, and us broadcasting a holder
impl<Signer: WriteableEcdsaChannelSigner> PartialEq for ChannelMonitor<Signer> where Signer: PartialEq {
fn eq(&self, other: &Self) -> bool {
- let inner = self.inner.lock().unwrap();
- let other = other.inner.lock().unwrap();
- inner.eq(&other)
+ // We need some kind of total lockorder. Absent a better idea, we sort by position in
+ // memory and take locks in that order (assuming that we can't move within memory while a
+ // lock is held).
+ let ord = ((self as *const _) as usize) < ((other as *const _) as usize);
+ let a = if ord { self.inner.unsafe_well_ordered_double_lock_self() } else { other.inner.unsafe_well_ordered_double_lock_self() };
+ let b = if ord { other.inner.unsafe_well_ordered_double_lock_self() } else { self.inner.unsafe_well_ordered_double_lock_self() };
+ a.eq(&b)
}
}
(9, self.counterparty_node_id, option),
(11, self.confirmed_commitment_tx_counterparty_output, option),
(13, self.spendable_txids_confirmed, vec_type),
+ (15, self.counterparty_fulfilled_htlcs, required),
});
Ok(())
counterparty_claimable_outpoints: HashMap::new(),
counterparty_commitment_txn_on_chain: HashMap::new(),
counterparty_hash_commitment_number: HashMap::new(),
+ counterparty_fulfilled_htlcs: HashMap::new(),
prev_holder_signed_commitment_tx: None,
current_holder_commitment_tx: holder_commitment_tx,
&self, holder_commitment_tx: HolderCommitmentTransaction,
htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
) -> Result<(), ()> {
- self.inner.lock().unwrap().provide_latest_holder_commitment_tx(holder_commitment_tx, htlc_outputs).map_err(|_| ())
+ self.inner.lock().unwrap().provide_latest_holder_commitment_tx(holder_commitment_tx, htlc_outputs, &Vec::new()).map_err(|_| ())
}
/// This is used to provide payment preimage(s) out-of-band during startup without updating the
/// `ChannelMonitor`. This is used to determine if an HTLC was removed from the channel prior
/// to the `ChannelManager` having been persisted.
///
- /// This is similar to [`Self::get_pending_outbound_htlcs`] except it includes HTLCs which were
- /// resolved by this `ChannelMonitor`.
- pub(crate) fn get_all_current_outbound_htlcs(&self) -> HashMap<HTLCSource, HTLCOutputInCommitment> {
+ /// This is similar to [`Self::get_pending_or_resolved_outbound_htlcs`] except it includes
+ /// HTLCs which were resolved on-chain (i.e. where the final HTLC resolution was done by an
+ /// event from this `ChannelMonitor`).
+ pub(crate) fn get_all_current_outbound_htlcs(&self) -> HashMap<HTLCSource, (HTLCOutputInCommitment, Option<PaymentPreimage>)> {
let mut res = HashMap::new();
// Just examine the available counterparty commitment transactions. See docs on
// `fail_unbroadcast_htlcs`, below, for justification.
if let Some(ref latest_outpoints) = us.counterparty_claimable_outpoints.get($txid) {
for &(ref htlc, ref source_option) in latest_outpoints.iter() {
if let &Some(ref source) = source_option {
- res.insert((**source).clone(), htlc.clone());
+ res.insert((**source).clone(), (htlc.clone(),
+ us.counterparty_fulfilled_htlcs.get(&SentHTLCId::from_source(source)).cloned()));
}
}
}
res
}
- /// Gets the set of outbound HTLCs which are pending resolution in this channel.
+ /// Gets the set of outbound HTLCs which are pending resolution in this channel or which were
+ /// resolved with a preimage from our counterparty.
+ ///
/// This is used to reconstruct pending outbound payments on restart in the ChannelManager.
- pub(crate) fn get_pending_outbound_htlcs(&self) -> HashMap<HTLCSource, HTLCOutputInCommitment> {
+ ///
+ /// Currently, the preimage is unused, however if it is present in the relevant internal state
+ /// an HTLC is always included even if it has been resolved.
+ pub(crate) fn get_pending_or_resolved_outbound_htlcs(&self) -> HashMap<HTLCSource, (HTLCOutputInCommitment, Option<PaymentPreimage>)> {
let us = self.inner.lock().unwrap();
// We're only concerned with the confirmation count of HTLC transactions, and don't
// actually care how many confirmations a commitment transaction may or may not have. Thus,
Some(commitment_tx_output_idx) == htlc.transaction_output_index
} else { false }
});
- if !htlc_update_confd {
- res.insert(source.clone(), htlc.clone());
+ let counterparty_resolved_preimage_opt =
+ us.counterparty_fulfilled_htlcs.get(&SentHTLCId::from_source(source)).cloned();
+ if !htlc_update_confd || counterparty_resolved_preimage_opt.is_some() {
+ res.insert(source.clone(), (htlc.clone(), counterparty_resolved_preimage_opt));
}
}
}
}
}
if matched_htlc { continue; }
+ if $self.counterparty_fulfilled_htlcs.get(&SentHTLCId::from_source(source)).is_some() {
+ continue;
+ }
$self.onchain_events_awaiting_threshold_conf.retain(|ref entry| {
if entry.height != $commitment_tx_conf_height { return true; }
match entry.event {
// Prune HTLCs from the previous counterparty commitment tx so we don't generate failure/fulfill
// events for now-revoked/fulfilled HTLCs.
if let Some(txid) = self.prev_counterparty_commitment_txid.take() {
- for &mut (_, ref mut source) in self.counterparty_claimable_outpoints.get_mut(&txid).unwrap() {
- *source = None;
+ if self.current_counterparty_commitment_txid.unwrap() != txid {
+ let cur_claimables = self.counterparty_claimable_outpoints.get(
+ &self.current_counterparty_commitment_txid.unwrap()).unwrap();
+ for (_, ref source_opt) in self.counterparty_claimable_outpoints.get(&txid).unwrap() {
+ if let Some(source) = source_opt {
+ if !cur_claimables.iter()
+ .any(|(_, cur_source_opt)| cur_source_opt == source_opt)
+ {
+ self.counterparty_fulfilled_htlcs.remove(&SentHTLCId::from_source(source));
+ }
+ }
+ }
+ for &mut (_, ref mut source_opt) in self.counterparty_claimable_outpoints.get_mut(&txid).unwrap() {
+ *source_opt = None;
+ }
+ } else {
+ assert!(cfg!(fuzzing), "Commitment txids are unique outside of fuzzing, where hashes can collide");
}
}
/// is important that any clones of this channel monitor (including remote clones) by kept
/// up-to-date as our holder commitment transaction is updated.
/// Panics if set_on_holder_tx_csv has never been called.
- fn provide_latest_holder_commitment_tx(&mut self, holder_commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>) -> Result<(), &'static str> {
- // block for Rust 1.34 compat
- let mut new_holder_commitment_tx = {
- let trusted_tx = holder_commitment_tx.trust();
- let txid = trusted_tx.txid();
- let tx_keys = trusted_tx.keys();
- self.current_holder_commitment_number = trusted_tx.commitment_number();
- HolderSignedTx {
- txid,
- revocation_key: tx_keys.revocation_key,
- a_htlc_key: tx_keys.broadcaster_htlc_key,
- b_htlc_key: tx_keys.countersignatory_htlc_key,
- delayed_payment_key: tx_keys.broadcaster_delayed_payment_key,
- per_commitment_point: tx_keys.per_commitment_point,
- htlc_outputs,
- to_self_value_sat: holder_commitment_tx.to_broadcaster_value_sat(),
- feerate_per_kw: trusted_tx.feerate_per_kw(),
- }
+ fn provide_latest_holder_commitment_tx(&mut self, holder_commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>, claimed_htlcs: &[(SentHTLCId, PaymentPreimage)]) -> Result<(), &'static str> {
+ let trusted_tx = holder_commitment_tx.trust();
+ let txid = trusted_tx.txid();
+ let tx_keys = trusted_tx.keys();
+ self.current_holder_commitment_number = trusted_tx.commitment_number();
+ let mut new_holder_commitment_tx = HolderSignedTx {
+ txid,
+ revocation_key: tx_keys.revocation_key,
+ a_htlc_key: tx_keys.broadcaster_htlc_key,
+ b_htlc_key: tx_keys.countersignatory_htlc_key,
+ delayed_payment_key: tx_keys.broadcaster_delayed_payment_key,
+ per_commitment_point: tx_keys.per_commitment_point,
+ htlc_outputs,
+ to_self_value_sat: holder_commitment_tx.to_broadcaster_value_sat(),
+ feerate_per_kw: trusted_tx.feerate_per_kw(),
};
self.onchain_tx_handler.provide_latest_holder_tx(holder_commitment_tx);
mem::swap(&mut new_holder_commitment_tx, &mut self.current_holder_commitment_tx);
self.prev_holder_signed_commitment_tx = Some(new_holder_commitment_tx);
+ for (claimed_htlc_id, claimed_preimage) in claimed_htlcs {
+ #[cfg(debug_assertions)] {
+ let cur_counterparty_htlcs = self.counterparty_claimable_outpoints.get(
+ &self.current_counterparty_commitment_txid.unwrap()).unwrap();
+ assert!(cur_counterparty_htlcs.iter().any(|(_, source_opt)| {
+ if let Some(source) = source_opt {
+ SentHTLCId::from_source(source) == *claimed_htlc_id
+ } else { false }
+ }));
+ }
+ self.counterparty_fulfilled_htlcs.insert(*claimed_htlc_id, *claimed_preimage);
+ }
if self.holder_tx_signed {
return Err("Latest holder commitment signed has already been signed, update is rejected");
}
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&*fee_estimator);
for update in updates.updates.iter() {
match update {
- ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs } => {
+ ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs, claimed_htlcs } => {
log_trace!(logger, "Updating ChannelMonitor with latest holder commitment transaction info");
if self.lockdown_from_offchain { panic!(); }
- if let Err(e) = self.provide_latest_holder_commitment_tx(commitment_tx.clone(), htlc_outputs.clone()) {
+ if let Err(e) = self.provide_latest_holder_commitment_tx(commitment_tx.clone(), htlc_outputs.clone(), &claimed_htlcs) {
log_error!(logger, "Providing latest holder commitment transaction failed/was refused:");
log_error!(logger, " {}", e);
ret = Err(());
let mut counterparty_node_id = None;
let mut confirmed_commitment_tx_counterparty_output = None;
let mut spendable_txids_confirmed = Some(Vec::new());
+ let mut counterparty_fulfilled_htlcs = Some(HashMap::new());
read_tlv_fields!(reader, {
(1, funding_spend_confirmed, option),
(3, htlcs_resolved_on_chain, vec_type),
(9, counterparty_node_id, option),
(11, confirmed_commitment_tx_counterparty_output, option),
(13, spendable_txids_confirmed, vec_type),
+ (15, counterparty_fulfilled_htlcs, option),
});
Ok((best_block.block_hash(), ChannelMonitor::from_impl(ChannelMonitorImpl {
counterparty_claimable_outpoints,
counterparty_commitment_txn_on_chain,
counterparty_hash_commitment_number,
+ counterparty_fulfilled_htlcs: counterparty_fulfilled_htlcs.unwrap(),
prev_holder_signed_commitment_tx,
current_holder_commitment_tx,
fn test_prune_preimages() {
let secp_ctx = Secp256k1::new();
let logger = Arc::new(TestLogger::new());
- let broadcaster = Arc::new(TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))});
+ let broadcaster = Arc::new(TestBroadcaster {
+ txn_broadcasted: Mutex::new(Vec::new()),
+ blocks: Arc::new(Mutex::new(Vec::new()))
+ });
let fee_estimator = TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
- let dummy_tx = Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
let mut preimages = Vec::new();
{
// Prune with one old state and a holder commitment tx holding a few overlaps with the
// old state.
let shutdown_pubkey = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
- let best_block = BestBlock::from_genesis(Network::Testnet);
+ let best_block = BestBlock::from_network(Network::Testnet);
let monitor = ChannelMonitor::new(Secp256k1::new(), keys,
Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &Script::new(),
(OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, Script::new()),
HolderCommitmentTransaction::dummy(), best_block, dummy_key);
monitor.provide_latest_holder_commitment_tx(HolderCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..10])).unwrap();
- let dummy_txid = dummy_tx.txid();
- monitor.provide_latest_counterparty_commitment_tx(dummy_txid, preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key, &logger);
- monitor.provide_latest_counterparty_commitment_tx(dummy_txid, preimages_slice_to_htlc_outputs!(preimages[15..20]), 281474976710654, dummy_key, &logger);
- monitor.provide_latest_counterparty_commitment_tx(dummy_txid, preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key, &logger);
- monitor.provide_latest_counterparty_commitment_tx(dummy_txid, preimages_slice_to_htlc_outputs!(preimages[18..20]), 281474976710652, dummy_key, &logger);
+ monitor.provide_latest_counterparty_commitment_tx(Txid::from_inner(Sha256::hash(b"1").into_inner()),
+ preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key, &logger);
+ monitor.provide_latest_counterparty_commitment_tx(Txid::from_inner(Sha256::hash(b"2").into_inner()),
+ preimages_slice_to_htlc_outputs!(preimages[15..20]), 281474976710654, dummy_key, &logger);
for &(ref preimage, ref hash) in preimages.iter() {
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator);
monitor.provide_payment_preimage(hash, preimage, &broadcaster, &bounded_fee_estimator, &logger);
test_preimages_exist!(&preimages[0..10], monitor);
test_preimages_exist!(&preimages[15..20], monitor);
+ monitor.provide_latest_counterparty_commitment_tx(Txid::from_inner(Sha256::hash(b"3").into_inner()),
+ preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key, &logger);
+
// Now provide a further secret, pruning preimages 15-17
secret[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
monitor.provide_secret(281474976710654, secret.clone()).unwrap();
test_preimages_exist!(&preimages[0..10], monitor);
test_preimages_exist!(&preimages[17..20], monitor);
+ monitor.provide_latest_counterparty_commitment_tx(Txid::from_inner(Sha256::hash(b"4").into_inner()),
+ preimages_slice_to_htlc_outputs!(preimages[18..20]), 281474976710652, dummy_key, &logger);
+
// Now update holder commitment tx info, pruning only element 18 as we still care about the
// previous commitment tx's preimages too
monitor.provide_latest_holder_commitment_tx(HolderCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..5])).unwrap();
Err(_) => panic!("Your rng is busted"),
}
}
+
+ /// Gets the "node_id" secret key used to sign gossip announcements, decode onion data, etc.
+ pub fn get_node_secret_key(&self) -> SecretKey {
+ self.node_secret
+ }
+
/// Derive an old [`WriteableEcdsaChannelSigner`] containing per-channel secrets based on a key derivation parameters.
pub fn derive_channel_keys(&self, channel_value_satoshis: u64, params: &[u8; 32]) -> InMemorySigner {
let chan_id = u64::from_be_bytes(params[0..8].try_into().unwrap());
pub fn derive_channel_keys(&self, channel_value_satoshis: u64, params: &[u8; 32]) -> InMemorySigner {
self.inner.derive_channel_keys(channel_value_satoshis, params)
}
+
+ /// Gets the "node_id" secret key used to sign gossip announcements, decode onion data, etc.
+ pub fn get_node_secret_key(&self) -> SecretKey {
+ self.inner.get_node_secret_key()
+ }
+
+ /// Gets the "node_id" secret key of the phantom node used to sign invoices, decode the
+ /// last-hop onion data, etc.
+ pub fn get_phantom_node_secret_key(&self) -> SecretKey {
+ self.phantom_secret
+ }
}
// Ensure that EcdsaChannelSigner can have a vtable
use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::blockdata::script::Script;
-use bitcoin::blockdata::transaction::TxOut;
use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::network::constants::Network;
use bitcoin::secp256k1::PublicKey;
impl BestBlock {
/// Constructs a `BestBlock` that represents the genesis block at height 0 of the given
/// network.
- pub fn from_genesis(network: Network) -> Self {
+ pub fn from_network(network: Network) -> Self {
BestBlock {
block_hash: genesis_block(network).header.block_hash(),
height: 0,
pub fn height(&self) -> u32 { self.height }
}
-/// An error when accessing the chain via [`Access`].
-#[derive(Clone, Debug)]
-pub enum AccessError {
- /// The requested chain is unknown.
- UnknownChain,
-
- /// The requested transaction doesn't exist or hasn't confirmed.
- UnknownTx,
-}
-
-/// The `Access` trait defines behavior for accessing chain data and state, such as blocks and
-/// UTXOs.
-pub trait Access {
- /// Returns the transaction output of a funding transaction encoded by [`short_channel_id`].
- /// Returns an error if `genesis_hash` is for a different chain or if such a transaction output
- /// is unknown.
- ///
- /// [`short_channel_id`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#definition-of-short_channel_id
- fn get_utxo(&self, genesis_hash: &BlockHash, short_channel_id: u64) -> Result<TxOut, AccessError>;
-}
/// The `Listen` trait is used to notify when blocks have been connected or disconnected from the
/// chain.
use crate::chain::package::PackageSolvingData;
use crate::chain::package::PackageTemplate;
use crate::util::logger::Logger;
-use crate::util::ser::{Readable, ReadableArgs, MaybeReadable, Writer, Writeable, VecWriter};
+use crate::util::ser::{Readable, ReadableArgs, MaybeReadable, UpgradableRequired, Writer, Writeable, VecWriter};
use crate::io;
use crate::prelude::*;
let mut txid = Txid::all_zeros();
let mut height = 0;
let mut block_hash = None;
- let mut event = None;
+ let mut event = UpgradableRequired(None);
read_tlv_fields!(reader, {
(0, txid, required),
(1, block_hash, option),
(2, height, required),
- (4, event, ignorable),
+ (4, event, upgradable_required),
});
- if let Some(ev) = event {
- Ok(Some(Self { txid, height, block_hash, event: ev }))
- } else {
- Ok(None)
- }
+ Ok(Some(Self { txid, height, block_hash, event: _init_tlv_based_struct_field!(event, upgradable_required) }))
}
}
pub mod util;
pub mod chain;
pub mod ln;
-#[allow(unused)]
-mod offers;
+pub mod offers;
pub mod routing;
pub mod onion_message;
blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 200); 200])),
};
let chain_mon = {
- let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
- let mut w = test_utils::TestVecWriter(Vec::new());
- monitor.write(&mut w).unwrap();
- let new_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut io::Cursor::new(&w.0), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
- assert!(new_monitor == *monitor);
+ let new_monitor = {
+ let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
+ let new_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
+ &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
+ assert!(new_monitor == *monitor);
+ new_monitor
+ };
let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
chain_mon
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
- if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+ if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Check that even though the persister is returning a InProgress,
// because the update is bogus, ultimately the error that's returned
// should be a PermanentFailure.
assert_eq!(nodes[0].node.list_channels().len(), 1);
if disconnect {
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
}
assert_eq!(nodes[0].node.list_channels().len(), 1);
if disconnect {
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
}
};
if disconnect_count & !disconnect_flags > 0 {
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
}
// Now fix monitor updating...
check_added_monitors!(nodes[0], 0);
macro_rules! disconnect_reconnect_peers { () => { {
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
// Note that the ordering of the events for different nodes is non-prescriptive, though the
// ordering of the two events that both go to nodes[2] have to stay in the same order.
- let (nodes_0_event, events_3) = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &events_3);
+ let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events_3);
let messages_a = match nodes_0_event {
MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
assert_eq!(node_id, nodes[0].node.get_our_node_id());
_ => panic!("Unexpected event type!"),
};
- let (nodes_2_event, events_3) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events_3);
+ let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
let send_event_b = SendEvent::from_event(nodes_2_event);
assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
let raa = if test_ignore_second_cs {
- let (nodes_2_event, _events_3) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events_3);
+ let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
match nodes_2_event {
MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
assert_eq!(node_id, nodes[2].node.get_our_node_id());
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[2].node.claim_funds(payment_preimage);
check_added_monitors!(nodes[2], 1);
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
check_added_monitors!(nodes[1], 1);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish);
assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish);
// Forward a payment for B to claim
let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
nodes[1].node.claim_funds(payment_preimage_1);
check_added_monitors!(nodes[1], 1);
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
+ get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+ }
+ {
let mut node_1_per_peer_lock;
let mut node_1_peer_state_lock;
- get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived;
get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived;
}
// Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
// is still failing to update monitors.
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.claim_funds(payment_preimage_1);
- expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 0);
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
+ expect_pending_htlcs_forwardable_ignore!(nodes[1]);
let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
let channel_id = chan_1.2;
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
+ expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
check_added_monitors!(nodes[1], 0);
let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
expect_payment_sent!(nodes[0], payment_preimage_1);
// Get the payment forwards, note that they were batched into one commitment update.
- expect_pending_htlcs_forwardable!(nodes[1]);
+ nodes[1].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[1], 1);
let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 2);
- if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[0] {
+ assert_eq!(events.len(), 3);
+ if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[1] {
assert_eq!(payment_hash, payment_hash_1);
assert!(payment_failed_permanently);
} else { panic!("Unexpected event!"); }
- match events[1] {
+ match events[2] {
+ Event::PaymentFailed { payment_hash, .. } => {
+ assert_eq!(payment_hash, payment_hash_1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[0] {
Event::PendingHTLCsForwardable { .. } => { },
_ => panic!("Unexpected event"),
};
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.claim_funds(payment_preimage_1);
- expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
+ expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
check_added_monitors!(nodes[1], 0);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
}
// Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
// bs_first_raa is not delivered until it is re-generated after reconnect
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
}
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.claim_funds(payment_preimage_0);
check_added_monitors!(nodes[0], 1);
- expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
} else {
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
}
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
// Now reconnect the two
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
+ expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
// New outbound messages should be generated immediately upon a call to
// get_and_clear_pending_msg_events (but not before).
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
}
- nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
- nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
if second_fails {
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
- check_closed_broadcast!(nodes[0], true);
+
+ // We always send the `shutdown` response when initiating a shutdown, even if we immediately
+ // close the channel thereafter.
+ let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 3);
+ if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
+ if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
+ if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }
+
check_added_monitors!(nodes[0], 2);
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
}
assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &shutdown);
- check_closed_broadcast!(nodes[1], true);
+
+ // We always send the `shutdown` response when receiving a shutdown, even if we immediately
+ // close the channel thereafter.
+ let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 3);
+ if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
+ if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
+ if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }
+
check_added_monitors!(nodes[1], 2);
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
}
// `claim_funds` results in a ChannelMonitorUpdate.
nodes[1].node.claim_funds(payment_preimage_1);
check_added_monitors!(nodes[1], 1);
- expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
// which had some asserts that prevented it from being called twice.
nodes[1].node.claim_funds(payment_preimage_2);
check_added_monitors!(nodes[1], 1);
- expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
check_added_monitors!(nodes[1], 0);
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2);
- // Complete the first HTLC.
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
+ // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions
+ // and get both PaymentClaimed events at once.
+ let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_1),
+ _ => panic!("Unexpected Event: {:?}", events[0]),
+ }
+ match events[1] {
+ Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_2),
+ _ => panic!("Unexpected Event: {:?}", events[1]),
+ }
+
+ assert_eq!(msg_events.len(), 1);
let (update_fulfill_1, commitment_signed_b1, node_id) = {
- match &events[0] {
+ match &msg_events[0] {
&MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
assert!(update_add_htlcs.is_empty());
assert_eq!(update_fulfill_htlcs.len(), 1);
use crate::ln::msgs;
use crate::ln::msgs::{DecodeError, OptionalField, DataLossProtect};
use crate::ln::script::{self, ShutdownScript};
-use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
+use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
use crate::ln::chan_utils;
use crate::ln::onion_utils::HTLCFailReason;
#[derive(Clone)]
enum OutboundHTLCOutcome {
+ /// LDK version 0.0.105+ will always fill in the preimage here.
Success(Option<PaymentPreimage>),
Failure(HTLCFailReason),
}
}
/// The return type of get_update_fulfill_htlc_and_commit.
-pub enum UpdateFulfillCommitFetch {
+pub enum UpdateFulfillCommitFetch<'a> {
/// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
/// it in the holding cell, or re-generated the update_fulfill message after the same claim was
/// previously placed in the holding cell (and has since been removed).
NewClaim {
/// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
- monitor_update: ChannelMonitorUpdate,
+ monitor_update: &'a ChannelMonitorUpdate,
/// The value of the HTLC which was claimed, in msat.
htlc_value_msat: u64,
- /// The update_fulfill message and commitment_signed message (if the claim was not placed
- /// in the holding cell).
- msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>,
},
/// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
/// or has been forgotten (presumably previously claimed).
DuplicateClaim {},
}
-/// The return value of `revoke_and_ack` on success, primarily updates to other channels or HTLC
-/// state.
-pub(super) struct RAAUpdates {
- pub commitment_update: Option<msgs::CommitmentUpdate>,
- pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
- pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
- pub finalized_claimed_htlcs: Vec<HTLCSource>,
- pub monitor_update: ChannelMonitorUpdate,
- pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>,
-}
-
/// The return value of `monitor_updating_restored`
pub(super) struct MonitorRestoreUpdates {
pub raa: Option<msgs::RevokeAndACK>,
monitor_pending_channel_ready: bool,
monitor_pending_revoke_and_ack: bool,
monitor_pending_commitment_signed: bool,
+
+ // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
+ // responsible for some of the HTLCs here or not - we don't know whether the update in question
+ // completed or not. We currently ignore these fields entirely when force-closing a channel,
+ // but need to handle this somehow or we run the risk of losing HTLCs!
monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
monitor_pending_finalized_fulfills: Vec<HTLCSource>,
/// The unique identifier used to re-derive the private key material for the channel through
/// [`SignerProvider::derive_channel_signer`].
channel_keys_id: [u8; 32],
+
+ /// When we generate [`ChannelMonitorUpdate`]s to persist, they may not be persisted immediately.
+ /// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence
+ /// completes we still need to be able to complete the persistence. Thus, we have to keep a
+ /// copy of the [`ChannelMonitorUpdate`] here until it is complete.
+ pending_monitor_updates: Vec<ChannelMonitorUpdate>,
}
#[cfg(any(test, fuzzing))]
channel_type,
channel_keys_id,
+
+ pending_monitor_updates: Vec::new(),
})
}
channel_type,
channel_keys_id,
+
+ pending_monitor_updates: Vec::new(),
};
Ok(chan)
}
}
- pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<UpdateFulfillCommitFetch, (ChannelError, ChannelMonitorUpdate)> where L::Target: Logger {
+ pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
- UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(update_fulfill_htlc) } => {
- let (commitment, mut additional_update) = match self.send_commitment_no_status_check(logger) {
- Err(e) => return Err((e, monitor_update)),
- Ok(res) => res
- };
- // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(_) } => {
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
// strictly increasing by one, so decrement it here.
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
- Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: Some((update_fulfill_htlc, commitment)) })
+ self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+ self.pending_monitor_updates.push(monitor_update);
+ UpdateFulfillCommitFetch::NewClaim {
+ monitor_update: self.pending_monitor_updates.last().unwrap(),
+ htlc_value_msat,
+ }
},
- UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } =>
- Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: None }),
- UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}),
+ UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } => {
+ self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+ self.pending_monitor_updates.push(monitor_update);
+ UpdateFulfillCommitFetch::NewClaim {
+ monitor_update: self.pending_monitor_updates.last().unwrap(),
+ htlc_value_msat,
+ }
+ }
+ UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
}
}
pub fn funding_created<SP: Deref, L: Deref>(
&mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
- ) -> Result<(msgs::FundingSigned, ChannelMonitor<<SP::Target as SignerProvider>::Signer>, Option<msgs::ChannelReady>), ChannelError>
+ ) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError>
where
- SP::Target: SignerProvider,
+ SP::Target: SignerProvider<Signer = Signer>,
L::Target: Logger
{
if self.is_outbound() {
log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id()));
+ let need_channel_ready = self.check_get_channel_ready(0).is_some();
+ self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+
Ok((msgs::FundingSigned {
channel_id: self.channel_id,
signature
- }, channel_monitor, self.check_get_channel_ready(0)))
+ }, channel_monitor))
}
/// Handles a funding_signed message from the remote end.
/// If this call is successful, broadcast the funding transaction (and not before!)
pub fn funding_signed<SP: Deref, L: Deref>(
&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
- ) -> Result<(ChannelMonitor<<SP::Target as SignerProvider>::Signer>, Transaction, Option<msgs::ChannelReady>), ChannelError>
+ ) -> Result<ChannelMonitor<Signer>, ChannelError>
where
- SP::Target: SignerProvider,
+ SP::Target: SignerProvider<Signer = Signer>,
L::Target: Logger
{
if !self.is_outbound() {
log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
- Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap(), self.check_get_channel_ready(0)))
+ let need_channel_ready = self.check_get_channel_ready(0).is_some();
+ self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+ Ok(channel_monitor)
}
/// Handles a channel_ready message from our peer. If we've already sent our channel_ready
// If they haven't ever sent an updated point, the point they send should match
// the current one.
self.counterparty_cur_commitment_point
+ } else if self.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
+ // If we've advanced the commitment number once, the second commitment point is
+ // at `counterparty_prev_commitment_point`, which is not yet revoked.
+ debug_assert!(self.counterparty_prev_commitment_point.is_some());
+ self.counterparty_prev_commitment_point
} else {
// If they have sent updated points, channel_ready is always supposed to match
// their "first" point, which we re-derive here.
Ok(())
}
- pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>, ChannelMonitorUpdate), (Option<ChannelMonitorUpdate>, ChannelError)>
+ pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<&ChannelMonitorUpdate, ChannelError>
where L::Target: Logger
{
if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err((None, ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned())));
+ return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
}
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err((None, ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned())));
+ return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
}
if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.last_sent_closing_fee.is_some() {
- return Err((None, ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned())));
+ return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
}
let funding_script = self.get_funding_redeemscript();
log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
if let Err(_) = self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) {
- return Err((None, ChannelError::Close("Invalid commitment tx signature from peer".to_owned())));
+ return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
}
bitcoin_tx.txid
};
debug_assert!(!self.is_outbound());
let counterparty_reserve_we_require_msat = self.holder_selected_channel_reserve_satoshis * 1000;
if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
- return Err((None, ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned())));
+ return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
}
}
#[cfg(any(test, fuzzing))]
}
if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
- return Err((None, ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs))));
+ return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
}
// TODO: Sadly, we pass HTLCs twice to ChannelMonitor: once via the HolderCommitmentTransaction and once via the update
log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id()));
if let Err(_) = self.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
- return Err((None, ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())));
+ return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
}
htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source));
} else {
self.counterparty_funding_pubkey()
);
- let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number - 1, &self.secp_ctx);
self.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
- .map_err(|_| (None, ChannelError::Close("Failed to validate our commitment".to_owned())))?;
- let per_commitment_secret = self.holder_signer.release_commitment_secret(self.cur_holder_commitment_transaction_number + 1);
+ .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
// Update state now that we've passed all the can-fail calls...
let mut need_commitment = false;
}
}
- self.latest_monitor_update_id += 1;
- let mut monitor_update = ChannelMonitorUpdate {
- update_id: self.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
- commitment_tx: holder_commitment_tx,
- htlc_outputs: htlcs_and_sigs
- }]
- };
-
for htlc in self.pending_inbound_htlcs.iter_mut() {
let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
Some(forward_info.clone())
need_commitment = true;
}
}
+ let mut claimed_htlcs = Vec::new();
for htlc in self.pending_outbound_htlcs.iter_mut() {
if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
// Grab the preimage, if it exists, instead of cloning
let mut reason = OutboundHTLCOutcome::Success(None);
mem::swap(outcome, &mut reason);
+ if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
+ // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
+ // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
+ // have a `Success(None)` reason. In this case we could forget some HTLC
+ // claims, but such an upgrade is unlikely and including claimed HTLCs here
+ // fixes a bug which the user was exposed to on 0.0.104 when they started the
+ // claim anyway.
+ claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
+ }
htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
need_commitment = true;
}
}
+ self.latest_monitor_update_id += 1;
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
+ commitment_tx: holder_commitment_tx,
+ htlc_outputs: htlcs_and_sigs,
+ claimed_htlcs,
+ }]
+ };
+
self.cur_holder_commitment_transaction_number -= 1;
// Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
- // send_commitment_no_status_check() next which will reset this to RAAFirst.
+ // build_commitment_no_status_check() next which will reset this to RAAFirst.
self.resend_order = RAACommitmentOrder::CommitmentFirst;
if (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
// the corresponding HTLC status updates so that get_last_commitment_update
// includes the right HTLCs.
self.monitor_pending_commitment_signed = true;
- let (_, mut additional_update) = self.send_commitment_no_status_check(logger).map_err(|e| (None, e))?;
- // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
// strictly increasing by one, so decrement it here.
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
}
log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
log_bytes!(self.channel_id));
- return Err((Some(monitor_update), ChannelError::Ignore("Previous monitor update failure prevented generation of RAA".to_owned())));
+ self.pending_monitor_updates.push(monitor_update);
+ return Ok(self.pending_monitor_updates.last().unwrap());
}
- let commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ let need_commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
// If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
// we'll send one right away when we get the revoke_and_ack when we
// free_holding_cell_htlcs().
- let (msg, mut additional_update) = self.send_commitment_no_status_check(logger).map_err(|e| (None, e))?;
- // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
// strictly increasing by one, so decrement it here.
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
- Some(msg)
- } else { None };
+ true
+ } else { false };
log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
- log_bytes!(self.channel_id()), if commitment_signed.is_some() { " our own commitment_signed and" } else { "" });
-
- Ok((msgs::RevokeAndACK {
- channel_id: self.channel_id,
- per_commitment_secret,
- next_per_commitment_point,
- }, commitment_signed, monitor_update))
+ log_bytes!(self.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
+ self.pending_monitor_updates.push(monitor_update);
+ self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
+ return Ok(self.pending_monitor_updates.last().unwrap());
}
/// Public version of the below, checking relevant preconditions first.
/// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
/// returns `(None, Vec::new())`.
- pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> Result<(Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where L::Target: Logger {
+ pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
if self.channel_state >= ChannelState::ChannelReady as u32 &&
(self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
self.free_holding_cell_htlcs(logger)
- } else { Ok((None, Vec::new())) }
+ } else { (None, Vec::new()) }
}
/// Frees any pending commitment updates in the holding cell, generating the relevant messages
/// for our counterparty.
- fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> Result<(Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where L::Target: Logger {
+ fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
if self.holding_cell_htlc_updates.len() != 0 || self.holding_cell_update_fee.is_some() {
log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.holding_cell_htlc_updates.len(),
}
}
if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.holding_cell_update_fee.is_none() {
- return Ok((None, htlcs_to_fail));
+ return (None, htlcs_to_fail);
}
let update_fee = if let Some(feerate) = self.holding_cell_update_fee.take() {
self.send_update_fee(feerate, false, logger)
None
};
- let (commitment_signed, mut additional_update) = self.send_commitment_no_status_check(logger)?;
- // send_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
// but we want them to be strictly increasing by one, so reset it here.
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
log_bytes!(self.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
- Ok((Some((msgs::CommitmentUpdate {
- update_add_htlcs,
- update_fulfill_htlcs,
- update_fail_htlcs,
- update_fail_malformed_htlcs: Vec::new(),
- update_fee,
- commitment_signed,
- }, monitor_update)), htlcs_to_fail))
+ self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+ self.pending_monitor_updates.push(monitor_update);
+ (Some(self.pending_monitor_updates.last().unwrap()), htlcs_to_fail)
} else {
- Ok((None, Vec::new()))
+ (None, Vec::new())
}
}
/// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
/// generating an appropriate error *after* the channel state has been updated based on the
/// revoke_and_ack message.
- pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<RAAUpdates, ChannelError>
+ pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, &ChannelMonitorUpdate), ChannelError>
where L::Target: Logger,
{
if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
// When the monitor updating is restored we'll call get_last_commitment_update(),
// which does not update state, but we're definitely now awaiting a remote revoke
// before we can step forward any more, so set it here.
- let (_, mut additional_update) = self.send_commitment_no_status_check(logger)?;
- // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
// strictly increasing by one, so decrement it here.
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
self.monitor_pending_failures.append(&mut revoked_htlcs);
self.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id()));
- return Ok(RAAUpdates {
- commitment_update: None, finalized_claimed_htlcs: Vec::new(),
- accepted_htlcs: Vec::new(), failed_htlcs: Vec::new(),
- monitor_update,
- holding_cell_failed_htlcs: Vec::new()
- });
+ self.pending_monitor_updates.push(monitor_update);
+ return Ok((Vec::new(), self.pending_monitor_updates.last().unwrap()));
}
- match self.free_holding_cell_htlcs(logger)? {
- (Some((mut commitment_update, mut additional_update)), htlcs_to_fail) => {
- commitment_update.update_fail_htlcs.reserve(update_fail_htlcs.len());
- for fail_msg in update_fail_htlcs.drain(..) {
- commitment_update.update_fail_htlcs.push(fail_msg);
- }
- commitment_update.update_fail_malformed_htlcs.reserve(update_fail_malformed_htlcs.len());
- for fail_msg in update_fail_malformed_htlcs.drain(..) {
- commitment_update.update_fail_malformed_htlcs.push(fail_msg);
- }
-
+ match self.free_holding_cell_htlcs(logger) {
+ (Some(_), htlcs_to_fail) => {
+ let mut additional_update = self.pending_monitor_updates.pop().unwrap();
// free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
// strictly increasing by one, so decrement it here.
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
- Ok(RAAUpdates {
- commitment_update: Some(commitment_update),
- finalized_claimed_htlcs,
- accepted_htlcs: to_forward_infos,
- failed_htlcs: revoked_htlcs,
- monitor_update,
- holding_cell_failed_htlcs: htlcs_to_fail
- })
+ self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ self.pending_monitor_updates.push(monitor_update);
+ Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
},
(None, htlcs_to_fail) => {
if require_commitment {
- let (commitment_signed, mut additional_update) = self.send_commitment_no_status_check(logger)?;
+ let mut additional_update = self.build_commitment_no_status_check(logger);
- // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
// strictly increasing by one, so decrement it here.
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
- Ok(RAAUpdates {
- commitment_update: Some(msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs,
- update_fail_malformed_htlcs,
- update_fee: None,
- commitment_signed
- }),
- finalized_claimed_htlcs,
- accepted_htlcs: to_forward_infos, failed_htlcs: revoked_htlcs,
- monitor_update, holding_cell_failed_htlcs: htlcs_to_fail
- })
+ self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ self.pending_monitor_updates.push(monitor_update);
+ Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
} else {
log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id()));
- Ok(RAAUpdates {
- commitment_update: None,
- finalized_claimed_htlcs,
- accepted_htlcs: to_forward_infos, failed_htlcs: revoked_htlcs,
- monitor_update, holding_cell_failed_htlcs: htlcs_to_fail
- })
+ self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ self.pending_monitor_updates.push(monitor_update);
+ Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
}
}
}
}
/// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
- /// This must be called immediately after the [`chain::Watch`] call which returned
- /// [`ChannelMonitorUpdateStatus::InProgress`].
+ /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
+ /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
+ /// update completes (potentially immediately).
/// The messages which were generated with the monitor update must *not* have been sent to the
/// remote end, and must instead have been dropped. They will be regenerated when
/// [`Self::monitor_updating_restored`] is called.
///
+ /// [`ChannelManager`]: super::channelmanager::ChannelManager
/// [`chain::Watch`]: crate::chain::Watch
/// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
- pub fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
+ fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
{
assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
self.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
+ self.pending_monitor_updates.clear();
// If we're past (or at) the FundingSent stage on an outbound channel, try to
// (re-)broadcast the funding transaction as we may have declined to broadcast it when we
pub fn shutdown<SP: Deref>(
&mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
- ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+ ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
where SP::Target: SignerProvider
{
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
let monitor_update = if update_shutdown_script {
self.latest_monitor_update_id += 1;
- Some(ChannelMonitorUpdate {
+ let monitor_update = ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
scriptpubkey: self.get_closing_scriptpubkey(),
}],
- })
+ };
+ self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+ self.pending_monitor_updates.push(monitor_update);
+ Some(self.pending_monitor_updates.last().unwrap())
} else { None };
let shutdown = if send_shutdown {
Some(msgs::Shutdown {
})
}
- pub fn get_feerate(&self) -> u32 {
+ pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
self.feerate_per_kw
}
(self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
}
+ pub fn get_next_monitor_update(&self) -> Option<&ChannelMonitorUpdate> {
+ self.pending_monitor_updates.first()
+ }
+
/// Returns true if funding_created was sent/received.
pub fn is_funding_initiated(&self) -> bool {
self.channel_state >= ChannelState::FundingSent as u32
Ok(Some(res))
}
- /// Only fails in case of bad keys
- fn send_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> Result<(msgs::CommitmentSigned, ChannelMonitorUpdate), ChannelError> where L::Target: Logger {
+ fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
// We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
// fail to generate this, we still are at least at a position where upgrading their status
}
self.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
- let (res, counterparty_commitment_txid, htlcs) = match self.send_commitment_no_state_update(logger) {
- Ok((res, (counterparty_commitment_tx, mut htlcs))) => {
- // Update state now that we've passed all the can-fail calls...
- let htlcs_no_ref: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
- htlcs.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
- (res, counterparty_commitment_tx, htlcs_no_ref)
- },
- Err(e) => return Err(e),
- };
+ let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger);
+ let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
+ htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
if self.announcement_sigs_state == AnnouncementSigsState::MessageSent {
self.announcement_sigs_state = AnnouncementSigsState::Committed;
}]
};
self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
- Ok((res, monitor_update))
+ monitor_update
}
- /// Only fails in case of bad keys. Used for channel_reestablish commitment_signed generation
- /// when we shouldn't change HTLC/channel state.
- fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
+ fn build_commitment_no_state_update<L: Deref>(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
let counterparty_keys = self.build_remote_transaction_keys();
let commitment_stats = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
- let (signature, htlc_signatures);
#[cfg(any(test, fuzzing))]
{
}
}
+ (counterparty_commitment_txid, commitment_stats.htlcs_included)
+ }
+
+ /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
+ /// generation when we shouldn't change HTLC/channel state.
+ fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
+ // Get the fee tests from `build_commitment_no_state_update`
+ #[cfg(any(test, fuzzing))]
+ self.build_commitment_no_state_update(logger);
+
+ let counterparty_keys = self.build_remote_transaction_keys();
+ let commitment_stats = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
+ let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
+ let (signature, htlc_signatures);
+
{
let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
}, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
}
- /// Adds a pending outbound HTLC to this channel, and creates a signed commitment transaction
- /// to send to the remote peer in one go.
+ /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
+ /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
///
/// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
- /// [`Self::send_htlc`] and [`Self::send_commitment_no_state_update`] for more info.
- pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<(msgs::UpdateAddHTLC, msgs::CommitmentSigned, ChannelMonitorUpdate)>, ChannelError> where L::Target: Logger {
- match self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger)? {
- Some(update_add_htlc) => {
- let (commitment_signed, monitor_update) = self.send_commitment_no_status_check(logger)?;
- Ok(Some((update_add_htlc, commitment_signed, monitor_update)))
+ /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
+ pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
+ let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger);
+ if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
+ match send_res? {
+ Some(_) => {
+ let monitor_update = self.build_commitment_no_status_check(logger);
+ self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+ self.pending_monitor_updates.push(monitor_update);
+ Ok(Some(self.pending_monitor_updates.last().unwrap()))
},
None => Ok(None)
}
/// Begins the shutdown process, getting a message for the remote peer and returning all
/// holding cell HTLCs for payment failure.
- pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures, target_feerate_sats_per_kw: Option<u32>)
- -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
+ ///
+ /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
+ /// [`ChannelMonitorUpdate`] will be returned).
+ pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
+ target_feerate_sats_per_kw: Option<u32>)
+ -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
where SP::Target: SignerProvider {
for htlc in self.pending_outbound_htlcs.iter() {
if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
}
+ // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
+ // script is set, we just force-close and call it a day.
+ let mut chan_closed = false;
+ if self.channel_state < ChannelState::FundingSent as u32 {
+ chan_closed = true;
+ }
+
let update_shutdown_script = match self.shutdown_scriptpubkey {
Some(_) => false,
- None => {
+ None if !chan_closed => {
let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey();
if !shutdown_scriptpubkey.is_compatible(their_features) {
return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
self.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
true
},
+ None => false,
};
// From here on out, we may not fail!
let monitor_update = if update_shutdown_script {
self.latest_monitor_update_id += 1;
- Some(ChannelMonitorUpdate {
+ let monitor_update = ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
scriptpubkey: self.get_closing_scriptpubkey(),
}],
- })
+ };
+ self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+ self.pending_monitor_updates.push(monitor_update);
+ Some(self.pending_monitor_updates.last().unwrap())
} else { None };
let shutdown = msgs::Shutdown {
channel_id: self.channel_id,
}
});
+ debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
+ "we can't both complete shutdown and return a monitor update");
+
Ok((shutdown, monitor_update, dropped_outbound_htlcs))
}
channel_type: channel_type.unwrap(),
channel_keys_id,
+
+ pending_monitor_updates: Vec::new(),
})
}
}
first_hop_htlc_msat: 548,
payment_id: PaymentId([42; 32]),
payment_secret: None,
- payment_params: None,
}
});
let secp_ctx = Secp256k1::new();
let seed = [42; 32];
let network = Network::Testnet;
- let best_block = BestBlock::from_genesis(network);
+ let best_block = BestBlock::from_network(network);
let chain_hash = best_block.block_hash();
let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
}]};
let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
- let (funding_signed_msg, _, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
+ let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
// Node B --> Node A: funding signed
let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger);
let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let mut config = UserConfig::default();
config.channel_handshake_config.announced_channel = false;
- let mut chan = Channel::<InMemorySigner>::new_outbound(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 100000, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
+ let mut chan = Channel::<InMemorySigner>::new_outbound(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
chan.holder_dust_limit_satoshis = 546;
chan.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
} }
}
+ // anchors: simple commitment tx with no HTLCs and single anchor
+ test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
+ "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
+ "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
+
// simple commitment tx with no HTLCs
chan.value_to_self_msat = 7000000000;
chan.pending_outbound_htlcs.push({
let mut out = OutboundHTLCOutput{
htlc_id: 6,
- amount_msat: 5000000,
+ amount_msat: 5000001,
cltv_expiry: 506,
payment_hash: PaymentHash([0; 32]),
state: OutboundHTLCState::Committed,
out
});
- test_commitment!("30440220048705bec5288d28b3f29344b8d124853b1af423a568664d2c6f02c8ea886525022060f998a461052a2476b912db426ea2a06700953a241135c7957f2e79bc222df9",
- "3045022100c4f1d60b6fca9febc8b39de1a31e84c5f7c4b41c97239ef05f4350aa484c6b5e02200c5134ac8b20eb7a29d0dd4a501f6aa8fefb8489171f4cb408bd2a32324ab03f",
- "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a79f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c4f1d60b6fca9febc8b39de1a31e84c5f7c4b41c97239ef05f4350aa484c6b5e02200c5134ac8b20eb7a29d0dd4a501f6aa8fefb8489171f4cb408bd2a32324ab03f014730440220048705bec5288d28b3f29344b8d124853b1af423a568664d2c6f02c8ea886525022060f998a461052a2476b912db426ea2a06700953a241135c7957f2e79bc222df901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
+ test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
+ "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
+ "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
{ 0,
- "304502210081cbb94121761d34c189cd4e6a281feea6f585060ad0ba2632e8d6b3c6bb8a6c02201007981bbd16539d63df2805b5568f1f5688cd2a885d04706f50db9b77ba13c6",
- "304502210090ed76aeb21b53236a598968abc66e2024691d07b62f53ddbeca8f93144af9c602205f873af5a0c10e62690e9aba09740550f194a9dc455ba4c1c23f6cde7704674c",
- "0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc34000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050048304502210081cbb94121761d34c189cd4e6a281feea6f585060ad0ba2632e8d6b3c6bb8a6c02201007981bbd16539d63df2805b5568f1f5688cd2a885d04706f50db9b77ba13c60148304502210090ed76aeb21b53236a598968abc66e2024691d07b62f53ddbeca8f93144af9c602205f873af5a0c10e62690e9aba09740550f194a9dc455ba4c1c23f6cde7704674c012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
+ "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
+ "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
+ "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
{ 1,
- "304402201d0f09d2bf7bc245a4f17980e1e9164290df16c70c6a2ff1592f5030d6108581022061e744a7dc151b36bf0aff7a4f1812ba90b8b03633bb979a270d19858fd960c5",
- "30450221009aef000d2e843a4202c1b1a2bf554abc9a7902bf49b2cb0759bc507456b7ebad02204e7c3d193ede2fd2b4cd6b39f51a920e581e35575e357e44d7b699c40ce61d39",
- "0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc3401000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402201d0f09d2bf7bc245a4f17980e1e9164290df16c70c6a2ff1592f5030d6108581022061e744a7dc151b36bf0aff7a4f1812ba90b8b03633bb979a270d19858fd960c5014830450221009aef000d2e843a4202c1b1a2bf554abc9a7902bf49b2cb0759bc507456b7ebad02204e7c3d193ede2fd2b4cd6b39f51a920e581e35575e357e44d7b699c40ce61d3901008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
+ "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
+ "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
+ "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
{ 2,
- "30440220010bf035d5823596e50dce2076a4d9f942d8d28031c9c428b901a02b6b8140de02203250e8e4a08bc5b4ecdca4d0eedf98223e02e3ac1c0206b3a7ffdb374aa21e5f",
- "30440220073de0067b88e425b3018b30366bfeda0ccb703118ccd3d02ead08c0f53511d002203fac50ac0e4cf8a3af0b4b1b12e801650591f748f8ddf1e089c160f10b69e511",
- "0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc3402000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220010bf035d5823596e50dce2076a4d9f942d8d28031c9c428b901a02b6b8140de02203250e8e4a08bc5b4ecdca4d0eedf98223e02e3ac1c0206b3a7ffdb374aa21e5f014730440220073de0067b88e425b3018b30366bfeda0ccb703118ccd3d02ead08c0f53511d002203fac50ac0e4cf8a3af0b4b1b12e801650591f748f8ddf1e089c160f10b69e51101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
+ "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
+ "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
+ "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
} );
- test_commitment_with_anchors!("3045022100c592f6b80d35b4f5d1e3bc9788f51141a0065be6013bad53a1977f7c444651660220278ac06ead9016bfb8dc476f186eabace2b02793b2f308442f5b0d5f24a68948",
- "3045022100c37ac4fc8538677631230c4b286f36b6f54c51fb4b34ef0bd0ba219ba47452630220278e09a745454ea380f3694392ed113762c68dd209b48360f547541088be9e45",
- "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aae9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c37ac4fc8538677631230c4b286f36b6f54c51fb4b34ef0bd0ba219ba47452630220278e09a745454ea380f3694392ed113762c68dd209b48360f547541088be9e4501483045022100c592f6b80d35b4f5d1e3bc9788f51141a0065be6013bad53a1977f7c444651660220278ac06ead9016bfb8dc476f186eabace2b02793b2f308442f5b0d5f24a6894801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
+ test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
+ "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
+ "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
{ 0,
- "3045022100de8a0649d54fd2e4fc04502c77df9b65da839bbd01854f818f129338b99564b2022009528dbb12c00e874cb2149b1dccc600c69ea5e4042ebf584984fcb029c2d1ec",
- "304402203e7c2622fa3ca29355d37a0ea991bfd7cdb54e6122a1d98d3229d092131f55cd022055263f7f8f32f4cd2f86da63ca106bd7badf0b19ee9833d80cd3b9216eeafd74",
- "02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe2902000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100de8a0649d54fd2e4fc04502c77df9b65da839bbd01854f818f129338b99564b2022009528dbb12c00e874cb2149b1dccc600c69ea5e4042ebf584984fcb029c2d1ec8347304402203e7c2622fa3ca29355d37a0ea991bfd7cdb54e6122a1d98d3229d092131f55cd022055263f7f8f32f4cd2f86da63ca106bd7badf0b19ee9833d80cd3b9216eeafd74012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
+ "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
+ "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
+ "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
{ 1,
- "3045022100de6eee8474376ea316d007b33103b4543a46bdf6fda5cbd5902b28a5bc14584f022002989e7b4f7813b77acbe4babcf96d7ffbbe0bf14cba24672364f8e591479edb",
- "3045022100c10688346a9d84647bde7027da07f0d79c6d4129307e4c6c9aea7bdbf25ac3350220269104209793c32c47491698c4e46ebea9c3293a1e4403f9abda39f79698f6b5",
- "02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe290300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100de6eee8474376ea316d007b33103b4543a46bdf6fda5cbd5902b28a5bc14584f022002989e7b4f7813b77acbe4babcf96d7ffbbe0bf14cba24672364f8e591479edb83483045022100c10688346a9d84647bde7027da07f0d79c6d4129307e4c6c9aea7bdbf25ac3350220269104209793c32c47491698c4e46ebea9c3293a1e4403f9abda39f79698f6b501008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
+ "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
+ "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
+ "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
{ 2,
- "3045022100fe87da8124ceecbcabb9d599c5339f40277c7c7406514fafbccbf180c7c09cf40220429c7fb6d0fd3705e931ab1219ab0432af38ae4d676008cc1964fbeb8cd35d2e",
- "3044022040ac769a851da31d8e4863e5f94719204f716c82a1ce6d6c52193d9a33b84bce022035df97b078ce80f20dca2109e4c6075af0b50148811452e7290e68b2680fced4",
- "02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe290400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fe87da8124ceecbcabb9d599c5339f40277c7c7406514fafbccbf180c7c09cf40220429c7fb6d0fd3705e931ab1219ab0432af38ae4d676008cc1964fbeb8cd35d2e83473044022040ac769a851da31d8e4863e5f94719204f716c82a1ce6d6c52193d9a33b84bce022035df97b078ce80f20dca2109e4c6075af0b50148811452e7290e68b2680fced401008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
+ "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
+ "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
+ "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
} );
}
//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
//! upon reconnect to the relevant peer(s).
//!
-//! It does not manage routing logic (see [`find_route`] for that) nor does it manage constructing
+//! It does not manage routing logic (see [`Router`] for that) nor does it manage constructing
//! on-chain transactions (it only monitors the chain to watch for any force-closes that might
//! imply it needs to fail HTLCs/payments/channels it manages).
-//!
-//! [`find_route`]: crate::routing::router::find_route
use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::transaction::Transaction;
use crate::ln::outbound_payment;
use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment};
use crate::ln::wire::Encode;
-use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner};
+use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
use crate::util::config::{UserConfig, ChannelConfig};
use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
use crate::util::events;
use crate::util::logger::{Level, Logger};
use crate::util::errors::APIError;
+use alloc::collections::BTreeMap;
+
use crate::io;
use crate::prelude::*;
use core::{cmp, mem};
use core::cell::RefCell;
use crate::io::Read;
-use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock};
+use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState};
use core::sync::atomic::{AtomicUsize, Ordering};
use core::time::Duration;
use core::ops::Deref;
// Re-export this for use in the public API.
-pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry};
+pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry, RetryableSendFailure};
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
Ok(InterceptId(buf))
}
}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
+pub(crate) enum SentHTLCId {
+ PreviousHopData { short_channel_id: u64, htlc_id: u64 },
+ OutboundRoute { session_priv: SecretKey },
+}
+impl SentHTLCId {
+ pub(crate) fn from_source(source: &HTLCSource) -> Self {
+ match source {
+ HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData {
+ short_channel_id: hop_data.short_channel_id,
+ htlc_id: hop_data.htlc_id,
+ },
+ HTLCSource::OutboundRoute { session_priv, .. } =>
+ Self::OutboundRoute { session_priv: *session_priv },
+ }
+ }
+}
+impl_writeable_tlv_based_enum!(SentHTLCId,
+ (0, PreviousHopData) => {
+ (0, short_channel_id, required),
+ (2, htlc_id, required),
+ },
+ (2, OutboundRoute) => {
+ (0, session_priv, required),
+ };
+);
+
+
/// Tracks the inbound corresponding to an outbound HTLC
#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
#[derive(Clone, PartialEq, Eq)]
first_hop_htlc_msat: u64,
payment_id: PaymentId,
payment_secret: Option<PaymentSecret>,
- /// Note that this is now "deprecated" - we write it for forwards (and read it for
- /// backwards) compatibility reasons, but prefer to use the data in the
- /// [`super::outbound_payment`] module, which stores per-payment data once instead of in
- /// each HTLC.
- payment_params: Option<PaymentParameters>,
},
}
#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
0u8.hash(hasher);
prev_hop_data.hash(hasher);
},
- HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat, payment_params } => {
+ HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat } => {
1u8.hash(hasher);
path.hash(hasher);
session_priv[..].hash(hasher);
payment_id.hash(hasher);
payment_secret.hash(hasher);
first_hop_htlc_msat.hash(hasher);
- payment_params.hash(hasher);
},
}
}
first_hop_htlc_msat: 0,
payment_id: PaymentId([2; 32]),
payment_secret: None,
- payment_params: None,
}
}
}
}
}
#[inline]
- fn ignore_no_close(err: String) -> Self {
- Self {
- err: LightningError {
- err,
- action: msgs::ErrorAction::IgnoreError,
- },
- chan_id: None,
- shutdown_finish: None,
- }
- }
- #[inline]
fn from_no_close(err: msgs::LightningError) -> Self {
Self { err, chan_id: None, shutdown_finish: None }
}
ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
}
+#[derive(Debug)]
pub(crate) enum MonitorUpdateCompletionAction {
/// Indicates that a payment ultimately destined for us was claimed and we should emit an
/// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for
EmitEvent { event: events::Event },
}
+impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
+ (0, PaymentClaimed) => { (0, payment_hash, required) },
+ (2, EmitEvent) => { (0, event, upgradable_required) },
+);
+
/// State we hold per-peer.
pub(super) struct PeerState<Signer: ChannelSigner> {
/// `temporary_channel_id` or `channel_id` -> `channel`.
/// Messages to send to the peer - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
pub(super) pending_msg_events: Vec<MessageSendEvent>,
+ /// Map from a specific channel to some action(s) that should be taken when all pending
+ /// [`ChannelMonitorUpdate`]s for the channel complete updating.
+ ///
+ /// Note that because we generally only have one entry here a HashMap is pretty overkill. A
+ /// BTreeMap currently stores more than ten elements per leaf node, so even up to a few
+ /// channels with a peer this will just be one allocation and will amount to a linear list of
+ /// channels to walk, avoiding the whole hashing rigmarole.
+ ///
+ /// Note that the channel may no longer exist. For example, if a channel was closed but we
+ /// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update
+ /// for a missing channel. While a malicious peer could construct a second channel with the
+ /// same `temporary_channel_id` (or final `channel_id` in the case of 0conf channels or prior
+ /// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
+ /// duplicates do not occur, so such channels should fail without a monitor update completing.
+ monitor_update_blocked_actions: BTreeMap<[u8; 32], Vec<MonitorUpdateCompletionAction>>,
+ /// The peer is currently connected (i.e. we've seen a
+ /// [`ChannelMessageHandler::peer_connected`] and no corresponding
+ /// [`ChannelMessageHandler::peer_disconnected`].
+ is_connected: bool,
+}
+
+impl <Signer: ChannelSigner> PeerState<Signer> {
+ /// Indicates that a peer meets the criteria where we're ok to remove it from our storage.
+ /// If true is passed for `require_disconnected`, the function will return false if we haven't
+ /// disconnected from the node already, ie. `PeerState::is_connected` is set to `true`.
+ fn ok_to_remove(&self, require_disconnected: bool) -> bool {
+ if require_disconnected && self.is_connected {
+ return false
+ }
+ self.channel_by_id.is_empty() && self.monitor_update_blocked_actions.is_empty()
+ }
}
/// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
/// offline for a full minute. In order to track this, you must call
/// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect.
///
+/// To avoid trivial DoS issues, ChannelManager limits the number of inbound connections and
+/// inbound channels without confirmed funding transactions. This may result in nodes which we do
+/// not have a channel with being unable to connect to us or open new channels with us if we have
+/// many peers with unfunded channels.
+///
+/// Because it is an indication of trust, inbound channels which we've accepted as 0conf are
+/// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
+/// never limited. Please ensure you limit the count of such channels yourself.
+///
/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
/// essentially you should default to using a SimpleRefChannelManager, and use a
/// very far in the past, and can only ever be up to two hours in the future.
highest_seen_timestamp: AtomicUsize,
- /// The bulk of our storage will eventually be here (message queues and the like). Currently
- /// the `per_peer_state` stores our channels on a per-peer basis, as well as the peer's latest
- /// features.
+ /// The bulk of our storage. Currently the `per_peer_state` stores our channels on a per-peer
+ /// basis, as well as the peer's latest features.
///
/// If we are connected to a peer we always at least have an entry here, even if no channels
/// are currently open with that peer.
/// [`OutboundPayments::remove_stale_resolved_payments`].
pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
+/// The maximum number of unfunded channels we can have per-peer before we start rejecting new
+/// (inbound) ones. The number of peers with unfunded channels is limited separately in
+/// [`MAX_UNFUNDED_CHANNEL_PEERS`].
+const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
+
+/// The maximum number of peers from which we will allow pending unfunded channels. Once we reach
+/// this many peers we reject new (inbound) channels from peers with which we don't have a channel.
+const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
+
+/// The maximum number of peers which we do not have a (funded) channel with. Once we reach this
+/// many peers we reject new (inbound) connections.
+const MAX_NO_CHANNEL_PEERS: usize = 250;
+
/// Information needed for constructing an invoice route hint for this channel.
#[derive(Clone, Debug, PartialEq)]
pub struct CounterpartyForwardingInfo {
/// inbound. This may be zero for inbound channels serialized with LDK versions prior to
/// 0.0.113.
pub user_channel_id: u128,
+ /// The currently negotiated fee rate denominated in satoshi per 1000 weight units,
+ /// which is applied to commitment and HTLC transactions.
+ ///
+ /// This value will be `None` for objects serialized with LDK versions prior to 0.0.115.
+ pub feerate_sat_per_1000_weight: Option<u32>,
/// Our total balance. This is the amount we would get if we close the channel.
/// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
/// amount is not likely to be recoverable on close.
pub fn get_outbound_payment_scid(&self) -> Option<u64> {
self.short_channel_id.or(self.outbound_scid_alias)
}
+
+ fn from_channel<Signer: WriteableEcdsaChannelSigner>(channel: &Channel<Signer>,
+ best_block_height: u32, latest_features: InitFeatures) -> Self {
+
+ let balance = channel.get_available_balances();
+ let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
+ channel.get_holder_counterparty_selected_channel_reserve_satoshis();
+ ChannelDetails {
+ channel_id: channel.channel_id(),
+ counterparty: ChannelCounterparty {
+ node_id: channel.get_counterparty_node_id(),
+ features: latest_features,
+ unspendable_punishment_reserve: to_remote_reserve_satoshis,
+ forwarding_info: channel.counterparty_forwarding_info(),
+ // Ensures that we have actually received the `htlc_minimum_msat` value
+ // from the counterparty through the `OpenChannel` or `AcceptChannel`
+ // message (as they are always the first message from the counterparty).
+ // Else `Channel::get_counterparty_htlc_minimum_msat` could return the
+ // default `0` value set by `Channel::new_outbound`.
+ outbound_htlc_minimum_msat: if channel.have_received_message() {
+ Some(channel.get_counterparty_htlc_minimum_msat()) } else { None },
+ outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(),
+ },
+ funding_txo: channel.get_funding_txo(),
+ // Note that accept_channel (or open_channel) is always the first message, so
+ // `have_received_message` indicates that type negotiation has completed.
+ channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
+ short_channel_id: channel.get_short_channel_id(),
+ outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
+ inbound_scid_alias: channel.latest_inbound_scid_alias(),
+ channel_value_satoshis: channel.get_value_satoshis(),
+ feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()),
+ unspendable_punishment_reserve: to_self_reserve_satoshis,
+ balance_msat: balance.balance_msat,
+ inbound_capacity_msat: balance.inbound_capacity_msat,
+ outbound_capacity_msat: balance.outbound_capacity_msat,
+ next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
+ user_channel_id: channel.get_user_id(),
+ confirmations_required: channel.minimum_depth(),
+ confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
+ force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
+ is_outbound: channel.is_outbound(),
+ is_channel_ready: channel.is_usable(),
+ is_usable: channel.is_live(),
+ is_public: channel.should_announce(),
+ inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
+ inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(),
+ config: Some(channel.config()),
+ }
+ }
}
/// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments.
/// made before LDK version 0.0.104.
payment_hash: Option<PaymentHash>,
},
- /// After a payment is explicitly abandoned by calling [`ChannelManager::abandon_payment`], it
- /// is marked as abandoned until an [`Event::PaymentFailed`] is generated. A payment could also
- /// be marked as abandoned if pathfinding fails repeatedly or retries have been exhausted.
+ /// After a payment's retries are exhausted per the provided [`Retry`], or it is explicitly
+ /// abandoned via [`ChannelManager::abandon_payment`], it is marked as abandoned until all
+ /// pending HTLCs for this payment resolve and an [`Event::PaymentFailed`] is generated.
Abandoned {
/// Hash of the payment that we have given up trying to send.
payment_hash: PaymentHash,
match $internal {
Ok(msg) => Ok(msg),
Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
- #[cfg(any(feature = "_test_utils", test))]
- {
- // In testing, ensure there are no deadlocks where the lock is already held upon
- // entering the macro.
- debug_assert!($self.pending_events.try_lock().is_ok());
- debug_assert!($self.per_peer_state.try_write().is_ok());
- }
+ // In testing, ensure there are no deadlocks where the lock is already held upon
+ // entering the macro.
+ debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
+ debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
let mut msg_events = Vec::with_capacity(2);
let mut peer_state = peer_state_mutex.lock().unwrap();
peer_state.pending_msg_events.append(&mut msg_events);
}
- #[cfg(any(feature = "_test_utils", test))]
- {
- if let None = per_peer_state.get(&$counterparty_node_id) {
- // This shouldn't occour in tests unless an unkown counterparty_node_id
- // has been passed to our message handling functions.
- let expected_error_str = format!("Can't find a peer matching the passed counterparty node_id {}", $counterparty_node_id);
- match err.action {
- msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage { ref channel_id, ref data }
- }
- => {
- assert_eq!(*data, expected_error_str);
- if let Some((err_channel_id, _user_channel_id)) = chan_id {
- debug_assert_eq!(*channel_id, err_channel_id);
- }
- }
- _ => debug_assert!(false, "Unexpected event"),
- }
- }
- }
}
// Return error in case higher-API need one
}
}
-macro_rules! handle_monitor_update_res {
- ($self: ident, $err: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
- match $err {
- ChannelMonitorUpdateStatus::PermanentFailure => {
- log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..]));
- update_maps_on_chan_removal!($self, $chan);
- // TODO: $failed_fails is dropped here, which will cause other channels to hit the
- // chain in a confused state! We need to move them into the ChannelMonitor which
- // will be responsible for failing backwards once things confirm on-chain.
- // It's ok that we drop $failed_forwards here - at this point we'd rather they
- // broadcast HTLC-Timeout and pay the associated fees to get their funds back than
- // us bother trying to claim it just to forward on to another peer. If we're
- // splitting hairs we'd prefer to claim payments that were to us, but we haven't
- // given up the preimage yet, so might as well just wait until the payment is
- // retried, avoiding the on-chain fees.
- let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(),
- $chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok() ));
- (res, true)
- },
- ChannelMonitorUpdateStatus::InProgress => {
- log_info!($self.logger, "Disabling channel {} due to monitor update in progress. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
- log_bytes!($chan_id[..]),
- if $resend_commitment && $resend_raa {
- match $action_type {
- RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
- RAACommitmentOrder::RevokeAndACKFirst => { "RAA then commitment" },
- }
- } else if $resend_commitment { "commitment" }
- else if $resend_raa { "RAA" }
- else { "nothing" },
- (&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(),
- (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len(),
- (&$failed_finalized_fulfills as &Vec<HTLCSource>).len());
- if !$resend_commitment {
- debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
- }
- if !$resend_raa {
- debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
- }
- $chan.monitor_updating_paused($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
- (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
- },
- ChannelMonitorUpdateStatus::Completed => {
- (Ok(()), false)
- },
- }
- };
- ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
- let (res, drop) = handle_monitor_update_res!($self, $err, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
- if drop {
- $entry.remove_entry();
- }
- res
- } };
- ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
- debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
- handle_monitor_update_res!($self, $err, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
- } };
- ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
- handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
- };
- ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
- handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
- };
- ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
- handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
- };
- ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
- handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
- };
-}
-
macro_rules! send_channel_ready {
($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
$pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
}
}
+macro_rules! handle_monitor_update_completion {
+ ($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
+ let mut updates = $chan.monitor_updating_restored(&$self.logger,
+ &$self.node_signer, $self.genesis_hash, &$self.default_configuration,
+ $self.best_block.read().unwrap().height());
+ let counterparty_node_id = $chan.get_counterparty_node_id();
+ let channel_update = if updates.channel_ready.is_some() && $chan.is_usable() {
+ // We only send a channel_update in the case where we are just now sending a
+ // channel_ready and the channel is in a usable state. We may re-send a
+ // channel_update later through the announcement_signatures process for public
+ // channels, but there's no reason not to just inform our counterparty of our fees
+ // now.
+ if let Ok(msg) = $self.get_channel_update_for_unicast($chan) {
+ Some(events::MessageSendEvent::SendChannelUpdate {
+ node_id: counterparty_node_id,
+ msg,
+ })
+ } else { None }
+ } else { None };
+
+ let update_actions = $peer_state.monitor_update_blocked_actions
+ .remove(&$chan.channel_id()).unwrap_or(Vec::new());
+
+ let htlc_forwards = $self.handle_channel_resumption(
+ &mut $peer_state.pending_msg_events, $chan, updates.raa,
+ updates.commitment_update, updates.order, updates.accepted_htlcs,
+ updates.funding_broadcastable, updates.channel_ready,
+ updates.announcement_sigs);
+ if let Some(upd) = channel_update {
+ $peer_state.pending_msg_events.push(upd);
+ }
+
+ let channel_id = $chan.channel_id();
+ core::mem::drop($peer_state_lock);
+ core::mem::drop($per_peer_state_lock);
+
+ $self.handle_monitor_update_completion_actions(update_actions);
+
+ if let Some(forwards) = htlc_forwards {
+ $self.forward_htlcs(&mut [forwards][..]);
+ }
+ $self.finalize_claims(updates.finalized_claimed_htlcs);
+ for failure in updates.failed_htlcs.drain(..) {
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
+ $self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
+ }
+ } }
+}
+
+macro_rules! handle_new_monitor_update {
+ ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
+ // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
+ // any case so that it won't deadlock.
+ debug_assert!($self.id_to_peer.try_lock().is_ok());
+ match $update_res {
+ ChannelMonitorUpdateStatus::InProgress => {
+ log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
+ log_bytes!($chan.channel_id()[..]));
+ Ok(())
+ },
+ ChannelMonitorUpdateStatus::PermanentFailure => {
+ log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
+ log_bytes!($chan.channel_id()[..]));
+ update_maps_on_chan_removal!($self, $chan);
+ let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown(
+ "ChannelMonitor storage failure".to_owned(), $chan.channel_id(),
+ $chan.get_user_id(), $chan.force_shutdown(false),
+ $self.get_channel_update_for_broadcast(&$chan).ok()));
+ $remove;
+ res
+ },
+ ChannelMonitorUpdateStatus::Completed => {
+ if ($update_id == 0 || $chan.get_next_monitor_update()
+ .expect("We can't be processing a monitor update if it isn't queued")
+ .update_id == $update_id) &&
+ $chan.get_latest_monitor_update_id() == $update_id
+ {
+ handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
+ }
+ Ok(())
+ },
+ }
+ } };
+ ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
+ handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
+ }
+}
+
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(&their_network_key);
- if let None = peer_state_mutex_opt {
- return Err(APIError::APIMisuseError { err: format!("Not connected to node: {}", their_network_key) });
- }
+ let peer_state_mutex = per_peer_state.get(&their_network_key)
+ .ok_or_else(|| APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) })?;
- let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let mut peer_state = peer_state_mutex.lock().unwrap();
let channel = {
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
let their_features = &peer_state.latest_features;
}
fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<<SP::Target as SignerProvider>::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
- let mut res = Vec::new();
// Allocate our best estimate of the number of channels we have in the `res`
// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
// a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
// of the ChannelMonitor handling. Therefore reallocations may still occur, but is
// unlikely as the `short_to_chan_info` map often contains 2 entries for
// the same channel.
- res.reserve(self.short_to_chan_info.read().unwrap().len());
+ let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
{
let best_block_height = self.best_block.read().unwrap().height();
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- for (channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
- let balance = channel.get_available_balances();
- let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
- channel.get_holder_counterparty_selected_channel_reserve_satoshis();
- res.push(ChannelDetails {
- channel_id: (*channel_id).clone(),
- counterparty: ChannelCounterparty {
- node_id: channel.get_counterparty_node_id(),
- features: peer_state.latest_features.clone(),
- unspendable_punishment_reserve: to_remote_reserve_satoshis,
- forwarding_info: channel.counterparty_forwarding_info(),
- // Ensures that we have actually received the `htlc_minimum_msat` value
- // from the counterparty through the `OpenChannel` or `AcceptChannel`
- // message (as they are always the first message from the counterparty).
- // Else `Channel::get_counterparty_htlc_minimum_msat` could return the
- // default `0` value set by `Channel::new_outbound`.
- outbound_htlc_minimum_msat: if channel.have_received_message() {
- Some(channel.get_counterparty_htlc_minimum_msat()) } else { None },
- outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(),
- },
- funding_txo: channel.get_funding_txo(),
- // Note that accept_channel (or open_channel) is always the first message, so
- // `have_received_message` indicates that type negotiation has completed.
- channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
- short_channel_id: channel.get_short_channel_id(),
- outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
- inbound_scid_alias: channel.latest_inbound_scid_alias(),
- channel_value_satoshis: channel.get_value_satoshis(),
- unspendable_punishment_reserve: to_self_reserve_satoshis,
- balance_msat: balance.balance_msat,
- inbound_capacity_msat: balance.inbound_capacity_msat,
- outbound_capacity_msat: balance.outbound_capacity_msat,
- next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
- user_channel_id: channel.get_user_id(),
- confirmations_required: channel.minimum_depth(),
- confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
- force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
- is_outbound: channel.is_outbound(),
- is_channel_ready: channel.is_usable(),
- is_usable: channel.is_live(),
- is_public: channel.should_announce(),
- inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
- inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(),
- config: Some(channel.config()),
- });
+ for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
+ let details = ChannelDetails::from_channel(channel, best_block_height,
+ peer_state.latest_features.clone());
+ res.push(details);
}
}
}
self.list_channels_with_filter(|_| true)
}
- /// Gets the list of usable channels, in random order. Useful as an argument to [`find_route`]
- /// to ensure non-announced channels are used.
+ /// Gets the list of usable channels, in random order. Useful as an argument to
+ /// [`Router::find_route`] to ensure non-announced channels are used.
///
/// These are guaranteed to have their [`ChannelDetails::is_usable`] value set to true, see the
/// documentation for [`ChannelDetails::is_usable`] for more info on exactly what the criteria
/// are.
- ///
- /// [`find_route`]: crate::routing::router::find_route
pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
// Note we use is_live here instead of usable which leads to somewhat confused
// internal/external nomenclature, but that's ok cause that's probably what the user
self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
}
+ /// Gets the list of channels we have with a given counterparty, in random order.
+ pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec<ChannelDetails> {
+ let best_block_height = self.best_block.read().unwrap().height();
+ let per_peer_state = self.per_peer_state.read().unwrap();
+
+ if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ let features = &peer_state.latest_features;
+ return peer_state.channel_by_id
+ .iter()
+ .map(|(_, channel)|
+ ChannelDetails::from_channel(channel, best_block_height, features.clone()))
+ .collect();
+ }
+ vec![]
+ }
+
/// Returns in an undefined order recent payments that -- if not fulfilled -- have yet to find a
/// successful path, or have unresolved HTLCs.
///
/// This can be useful for payments that may have been prepared, but ultimately not sent, as a
/// result of a crash. If such a payment exists, is not listed here, and an
- /// [`Event::PaymentSent`] has not been received, you may consider retrying the payment.
+ /// [`Event::PaymentSent`] has not been received, you may consider resending the payment.
///
/// [`Event::PaymentSent`]: events::Event::PaymentSent
pub fn list_recent_payments(&self) -> Vec<RecentPaymentDetails> {
let result: Result<(), _> = loop {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(APIError::APIMisuseError { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) });
- }
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
- let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.signer_provider, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?;
+ let funding_txo_opt = chan_entry.get().get_funding_txo();
+ let their_features = &peer_state.latest_features;
+ let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
+ .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight)?;
failed_htlcs = htlcs;
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update {
- let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update);
- let (result, is_permanent) =
- handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
- if is_permanent {
- remove_channel!(self, chan_entry);
- break result;
- }
- }
-
+ // We can send the `shutdown` message before updating the `ChannelMonitor`
+ // here as we don't need the monitor update to complete until we send a
+ // `shutdown_signed`, which we'll delay if we're pending a monitor update.
peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: *counterparty_node_id,
- msg: shutdown_msg
+ msg: shutdown_msg,
});
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update_opt.take() {
+ let update_id = monitor_update.update_id;
+ let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
+ break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
+ }
+
if chan_entry.get().is_shutdown() {
let channel = remove_channel!(self, chan_entry);
if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
-> Result<PublicKey, APIError> {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(peer_node_id);
+ let peer_state_mutex = per_peer_state.get(peer_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
let mut chan = {
- if let None = peer_state_mutex_opt {
- return Err(APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) });
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
if let Some(peer_msg) = peer_msg {
log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
self.finish_force_close_channel(chan.force_shutdown(broadcast));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let mut peer_state = peer_state_mutex.lock().unwrap();
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
- if let None = peer_state_mutex_opt {
+ if peer_state_mutex_opt.is_none() {
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
/// public, and thus should be called whenever the result is going to be passed out in a
/// [`MessageSendEvent::BroadcastChannelUpdate`] event.
///
- /// May be called with peer_state already locked!
+ /// Note that in `internal_closing_signed`, this function is called without the `peer_state`
+ /// corresponding to the channel's counterparty locked, as the channel been removed from the
+ /// storage and the `peer_state` lock has been dropped.
fn get_channel_update_for_broadcast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
if !chan.should_announce() {
return Err(LightningError {
/// is public (only returning an Err if the channel does not yet have an assigned short_id),
/// and thus MUST NOT be called unless the recipient of the resulting message has already
/// provided evidence that they know about the existence of the channel.
- /// May be called with peer_state already locked!
+ ///
+ /// Note that through `internal_closing_signed`, this function is called without the
+ /// `peer_state` corresponding to the channel's counterparty locked, as the channel been
+ /// removed from the storage and the `peer_state` lock has been dropped.
fn get_channel_update_for_unicast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
})
}
- // Only public for testing, this should otherwise never be called direcly
- pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ #[cfg(test)]
+ pub(crate) fn test_send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ let _lck = self.total_consistency_lock.read().unwrap();
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes)
+ }
+
+ fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ // The top-level caller should hold the total_consistency_lock read lock.
+ debug_assert!(self.total_consistency_lock.try_write().is_err());
+
log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
let prng_seed = self.entropy_source.get_secure_random_bytes();
let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
- .map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected"})?;
+ .map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected".to_owned()})?;
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?;
if onion_utils::route_size_insane(&onion_payloads) {
- return Err(APIError::InvalidRoute{err: "Route size too large considering onion data"});
+ return Err(APIError::InvalidRoute{err: "Route size too large considering onion data".to_owned()});
}
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-
let err: Result<(), _> = loop {
let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.first().unwrap().short_channel_id) {
None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
};
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(APIError::InvalidRoute{err: "No peer matching the path's first hop found!" });
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) {
- match {
- if !chan.get().is_live() {
- return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
- }
- break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
- htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
- path: path.clone(),
- session_priv: session_priv.clone(),
- first_hop_htlc_msat: htlc_msat,
- payment_id,
- payment_secret: payment_secret.clone(),
- payment_params: payment_params.clone(),
- }, onion_packet, &self.logger),
- chan)
- } {
- Some((update_add, commitment_signed, monitor_update)) => {
- let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update);
- let chan_id = chan.get().channel_id();
- match (update_err,
- handle_monitor_update_res!(self, update_err, chan,
- RAACommitmentOrder::CommitmentFirst, false, true))
- {
- (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
- (ChannelMonitorUpdateStatus::Completed, Ok(())) => {},
- (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
- // Note that MonitorUpdateInProgress here indicates (per function
- // docs) that we will resend the commitment update once monitor
- // updating completes. Therefore, we must return an error
- // indicating that it is unsafe to retry the payment wholesale,
- // which we do in the send_payment check for
- // MonitorUpdateInProgress, below.
- return Err(APIError::MonitorUpdateInProgress);
- },
- _ => unreachable!(),
+ if !chan.get().is_live() {
+ return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
+ }
+ let funding_txo = chan.get().get_funding_txo().unwrap();
+ let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(),
+ htlc_cltv, HTLCSource::OutboundRoute {
+ path: path.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: htlc_msat,
+ payment_id,
+ payment_secret: payment_secret.clone(),
+ }, onion_packet, &self.logger);
+ match break_chan_entry!(self, send_res, chan) {
+ Some(monitor_update) => {
+ let update_id = monitor_update.update_id;
+ let update_res = self.chain_monitor.update_channel(funding_txo, monitor_update);
+ if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan) {
+ break Err(e);
+ }
+ if update_res == ChannelMonitorUpdateStatus::InProgress {
+ // Note that MonitorUpdateInProgress here indicates (per function
+ // docs) that we will resend the commitment update once monitor
+ // updating completes. Therefore, we must return an error
+ // indicating that it is unsafe to retry the payment wholesale,
+ // which we do in the send_payment check for
+ // MonitorUpdateInProgress, below.
+ return Err(APIError::MonitorUpdateInProgress);
}
-
- log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
- peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: path.first().unwrap().pubkey,
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: vec![update_add],
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed,
- },
- });
},
None => { },
}
/// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this
/// method will error with an [`APIError::InvalidRoute`]. Note, however, that once a payment
/// is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of an
- /// [`Event::PaymentSent`]) LDK will not stop you from sending a second payment with the same
- /// [`PaymentId`].
+ /// [`Event::PaymentSent`] or [`Event::PaymentFailed`]) LDK will not stop you from sending a
+ /// second payment with the same [`PaymentId`].
///
/// Thus, in order to ensure duplicate payments are not sent, you should implement your own
/// tracking of payments, including state to indicate once a payment has completed. Because you
/// [`Route`], we assume the invoice had the basic_mpp feature set.
///
/// [`Event::PaymentSent`]: events::Event::PaymentSent
+ /// [`Event::PaymentFailed`]: events::Event::PaymentFailed
/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
/// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments
.send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on
/// `route_params` and retry failed payment paths based on `retry_strategy`.
- pub fn send_payment_with_retry(&self, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), PaymentSendFailure> {
+ pub fn send_payment_with_retry(&self, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments
.send_payment(payment_hash, payment_secret, payment_id, retry_strategy, route_params,
- &self.router, self.list_usable_channels(), self.compute_inflight_htlcs(),
+ &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
&self.entropy_source, &self.node_signer, best_block_height, &self.logger,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ &self.pending_events,
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
#[cfg(test)]
fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
#[cfg(test)]
}
- /// Retries a payment along the given [`Route`].
- ///
- /// Errors returned are a superset of those returned from [`send_payment`], so see
- /// [`send_payment`] documentation for more details on errors. This method will also error if the
- /// retry amount puts the payment more than 10% over the payment's total amount, if the payment
- /// for the given `payment_id` cannot be found (likely due to timeout or success), or if
- /// further retries have been disabled with [`abandon_payment`].
- ///
- /// [`send_payment`]: [`ChannelManager::send_payment`]
- /// [`abandon_payment`]: [`ChannelManager::abandon_payment`]
- pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
- self.pending_outbound_payments.retry_payment_with_route(route, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
- }
-
- /// Signals that no further retries for the given payment will occur.
+ /// Signals that no further retries for the given payment should occur. Useful if you have a
+ /// pending outbound payment with retries remaining, but wish to stop retrying the payment before
+ /// retries are exhausted.
///
- /// After this method returns, no future calls to [`retry_payment`] for the given `payment_id`
- /// are allowed. If no [`Event::PaymentFailed`] event had been generated before, one will be
- /// generated as soon as there are no remaining pending HTLCs for this payment.
+ /// If no [`Event::PaymentFailed`] event had been generated before, one will be generated as soon
+ /// as there are no remaining pending HTLCs for this payment.
///
/// Note that calling this method does *not* prevent a payment from succeeding. You must still
/// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to
/// determine the ultimate status of a payment.
///
/// If an [`Event::PaymentFailed`] event is generated and we restart without this
- /// [`ChannelManager`] having been persisted, the payment may still be in the pending state
- /// upon restart. This allows further calls to [`retry_payment`] (and requiring a second call
- /// to [`abandon_payment`] to mark the payment as failed again). Otherwise, future calls to
- /// [`retry_payment`] will fail with [`PaymentSendFailure::ParameterError`].
+ /// [`ChannelManager`] having been persisted, another [`Event::PaymentFailed`] may be generated.
///
- /// [`abandon_payment`]: Self::abandon_payment
- /// [`retry_payment`]: Self::retry_payment
/// [`Event::PaymentFailed`]: events::Event::PaymentFailed
/// [`Event::PaymentSent`]: events::Event::PaymentSent
pub fn abandon_payment(&self, payment_id: PaymentId) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- if let Some(payment_failed_ev) = self.pending_outbound_payments.abandon_payment(payment_id) {
- self.pending_events.lock().unwrap().push(payment_failed_ev);
- }
+ self.pending_outbound_payments.abandon_payment(payment_id, &self.pending_events);
}
/// Send a spontaneous payment, which is a payment that does not require the recipient to have
/// [`send_payment`]: Self::send_payment
pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments.send_spontaneous_payment_with_route(
route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer,
best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route
/// payments.
///
/// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
- pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, PaymentSendFailure> {
+ pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, payment_id,
retry_strategy, route_params, &self.router, self.list_usable_channels(),
- self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
- &self.logger,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
+ &self.logger, &self.pending_events,
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Send a payment that is probing the given route for liquidity. We calculate the
/// us to easily discern them from real payments.
pub fn send_probe(&self, hops: Vec<RouteHop>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a
&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
) -> Result<(), APIError> {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })
- }
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let (chan, msg) = {
let (res, chan) = {
(chan, funding_msg)
},
Err(_) => { return Err(APIError::ChannelUnavailable {
- err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned()
+ err: "Signer refused to sign the initial commitment transaction".to_owned()
}) },
}
};
&self.total_consistency_lock, &self.persistence_notifier,
);
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) });
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for channel_id in channel_ids {
if !peer_state.channel_by_id.contains_key(channel_id) {
let next_hop_scid = {
let peer_state_lock = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = peer_state_lock.get(&next_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.get(next_hop_channel_id) {
- Some(chan) => {
- if !chan.is_usable() {
- return Err(APIError::ChannelUnavailable {
- err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id))
- })
- }
- chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias())
- },
- None => return Err(APIError::ChannelUnavailable {
- err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id)
- })
- }
- } else {
- return Err(APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) });
+ let peer_state_mutex = peer_state_lock.get(&next_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.get(next_hop_channel_id) {
+ Some(chan) => {
+ if !chan.is_usable() {
+ return Err(APIError::ChannelUnavailable {
+ err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id))
+ })
+ }
+ chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias())
+ },
+ None => return Err(APIError::ChannelUnavailable {
+ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id)
+ })
}
};
};
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
- if let None = peer_state_mutex_opt {
+ if peer_state_mutex_opt.is_none() {
forwarding_channel_not_found!();
continue;
}
let best_block_height = self.best_block.read().unwrap().height();
self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
- || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv));
+ || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
+ &self.pending_events, &self.logger,
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv));
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<SP::Target as SignerProvider>::Signer>, new_feerate: u32) -> NotifyOption {
if !chan.is_outbound() { return NotifyOption::SkipPersist; }
// If the feerate has decreased by less than half, don't bother
- if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
+ if new_feerate <= chan.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.get_feerate_sat_per_1000_weight() {
log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
- log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
return NotifyOption::SkipPersist;
}
if !chan.is_live() {
log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
- log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
return NotifyOption::SkipPersist;
}
log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
- log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
chan.queue_update_fee(new_feerate, &self.logger);
NotifyOption::DoPersist
/// the channel.
/// * Expiring a channel's previous `ChannelConfig` if necessary to only allow forwarding HTLCs
/// with the current `ChannelConfig`.
+ /// * Removing peers which have disconnected but and no longer have any channels.
///
/// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
/// estimate fetches.
let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
let mut timed_out_mpp_htlcs = Vec::new();
+ let mut pending_peers_awaiting_removal = Vec::new();
{
let per_peer_state = self.per_peer_state.read().unwrap();
for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
+ let counterparty_node_id = *counterparty_node_id;
peer_state.channel_by_id.retain(|chan_id, chan| {
let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
if let Err(e) = chan.timer_check_closing_negotiation_progress() {
let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id);
- handle_errors.push((Err(err), *counterparty_node_id));
+ handle_errors.push((Err(err), counterparty_node_id));
if needs_close { return false; }
}
true
});
+ if peer_state.ok_to_remove(true) {
+ pending_peers_awaiting_removal.push(counterparty_node_id);
+ }
+ }
+ }
+
+ // When a peer disconnects but still has channels, the peer's `peer_state` entry in the
+ // `per_peer_state` is not removed by the `peer_disconnected` function. If the channels
+ // of to that peer is later closed while still being disconnected (i.e. force closed),
+ // we therefore need to remove the peer from `peer_state` separately.
+ // To avoid having to take the `per_peer_state` `write` lock once the channels are
+ // closed, we instead remove such peers awaiting removal here on a timer, to limit the
+ // negative effects on parallelism as much as possible.
+ if pending_peers_awaiting_removal.len() > 0 {
+ let mut per_peer_state = self.per_peer_state.write().unwrap();
+ for counterparty_node_id in pending_peers_awaiting_removal {
+ match per_peer_state.entry(counterparty_node_id) {
+ hash_map::Entry::Occupied(entry) => {
+ // Remove the entry if the peer is still disconnected and we still
+ // have no channels to the peer.
+ let remove_entry = {
+ let peer_state = entry.get().lock().unwrap();
+ peer_state.ok_to_remove(true)
+ };
+ if remove_entry {
+ entry.remove_entry();
+ }
+ },
+ hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ }
+ }
}
}
/// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on
/// startup during which time claims that were in-progress at shutdown may be replayed.
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
- self.fail_htlc_backwards_with_reason(payment_hash, &FailureCode::IncorrectOrUnknownPaymentDetails);
+ self.fail_htlc_backwards_with_reason(payment_hash, FailureCode::IncorrectOrUnknownPaymentDetails);
}
/// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the
/// reason for the failure.
///
/// See [`FailureCode`] for valid failure codes.
- pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: &FailureCode) {
+ pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash);
}
/// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`].
- fn get_htlc_fail_reason_from_failure_code(&self, failure_code: &FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
+ fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
match failure_code {
- FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(*failure_code as u16),
- FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(*failure_code as u16),
+ FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code as u16),
+ FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code as u16),
FailureCode::IncorrectOrUnknownPaymentDetails => {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
- HTLCFailReason::reason(*failure_code as u16, htlc_msat_height_data)
+ HTLCFailReason::reason(failure_code as u16, htlc_msat_height_data)
}
}
}
/// Fails an HTLC backwards to the sender of it to us.
/// Note that we do not assume that channels corresponding to failed HTLCs are still available.
fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
- #[cfg(any(feature = "_test_utils", test))]
- {
- // Ensure that no peer state channel storage lock is not held when calling this
- // function.
- // This ensures that future code doesn't introduce a lock_order requirement for
- // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
- // this function with any `per_peer_state` peer lock aquired would.
- let per_peer_state = self.per_peer_state.read().unwrap();
- for (_, peer) in per_peer_state.iter() {
- debug_assert!(peer.try_lock().is_ok());
- }
+ // Ensure that no peer state channel storage lock is held when calling this function.
+ // This ensures that future code doesn't introduce a lock-order requirement for
+ // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
+ // this function with any `per_peer_state` peer lock acquired would.
+ for (_, peer) in self.per_peer_state.read().unwrap().iter() {
+ debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
}
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
// from block_connected which may run during initialization prior to the chain_monitor
// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
match source {
- HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, ref payment_params, .. } => {
- self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path, session_priv, payment_id, payment_params, self.probing_cookie_secret, &self.secp_ctx, &self.pending_events, &self.logger);
+ HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
+ if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
+ session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
+ &self.pending_events, &self.logger)
+ { self.push_pending_forwards_ev(); }
},
HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => {
log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", log_bytes!(payment_hash.0), onion_error);
let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret);
- let mut forward_event = None;
+ let mut push_forward_ev = false;
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
if forward_htlcs.is_empty() {
- forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS));
+ push_forward_ev = true;
}
match forward_htlcs.entry(*short_channel_id) {
hash_map::Entry::Occupied(mut entry) => {
}
}
mem::drop(forward_htlcs);
+ if push_forward_ev { self.push_pending_forwards_ev(); }
let mut pending_events = self.pending_events.lock().unwrap();
- if let Some(time) = forward_event {
- pending_events.push(events::Event::PendingHTLCsForwardable {
- time_forwardable: time
- });
- }
pending_events.push(events::Event::HTLCHandlingFailed {
prev_channel_id: outpoint.to_channel_id(),
failed_next_destination: destination,
let mut expected_amt_msat = None;
let mut valid_mpp = true;
let mut errs = Vec::new();
- let mut per_peer_state = Some(self.per_peer_state.read().unwrap());
+ let per_peer_state = self.per_peer_state.read().unwrap();
for htlc in sources.iter() {
let (counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
}
};
- if let None = per_peer_state.as_ref().unwrap().get(&counterparty_node_id) {
+ let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+ if peer_state_mutex_opt.is_none() {
valid_mpp = false;
break;
}
- let peer_state_mutex = per_peer_state.as_ref().unwrap().get(&counterparty_node_id).unwrap();
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let None = peer_state.channel_by_id.get(&chan_id) {
+ if peer_state.channel_by_id.get(&chan_id).is_none() {
valid_mpp = false;
break;
}
claimable_amt_msat += htlc.value;
}
+ mem::drop(per_peer_state);
if sources.is_empty() || expected_amt_msat.is_none() {
- mem::drop(per_peer_state);
self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
return;
}
if claimable_amt_msat != expected_amt_msat.unwrap() {
- mem::drop(per_peer_state);
self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
expected_amt_msat.unwrap(), claimable_amt_msat);
}
if valid_mpp {
for htlc in sources.drain(..) {
- if per_peer_state.is_none() { per_peer_state = Some(self.per_peer_state.read().unwrap()); }
- if let Err((pk, err)) = self.claim_funds_from_hop(per_peer_state.take().unwrap(),
+ if let Err((pk, err)) = self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage,
|_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }))
{
}
}
}
- mem::drop(per_peer_state);
if !valid_mpp {
for htlc in sources.drain(..) {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
}
fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>) -> Option<MonitorUpdateCompletionAction>>(&self,
- per_peer_state_lock: RwLockReadGuard<HashMap<PublicKey, Mutex<PeerState<<SP::Target as SignerProvider>::Signer>>>>,
prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
-> Result<(), (PublicKey, MsgHandleErrInternal)> {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
+ let per_peer_state = self.per_peer_state.read().unwrap();
let chan_id = prev_hop.outpoint.to_channel_id();
-
let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
None => None
};
- let (found_channel, mut peer_state_opt) = if counterparty_node_id_opt.is_some() && per_peer_state_lock.get(&counterparty_node_id_opt.unwrap()).is_some() {
- let peer_mutex = per_peer_state_lock.get(&counterparty_node_id_opt.unwrap()).unwrap();
- let peer_state = peer_mutex.lock().unwrap();
- let found_channel = peer_state.channel_by_id.contains_key(&chan_id);
- (found_channel, Some(peer_state))
- } else { (false, None) };
+ let peer_state_opt = counterparty_node_id_opt.as_ref().map(
+ |counterparty_node_id| per_peer_state.get(counterparty_node_id).map(
+ |peer_mutex| peer_mutex.lock().unwrap()
+ )
+ ).unwrap_or(None);
- if found_channel {
- let peer_state = &mut *peer_state_opt.as_mut().unwrap();
+ if peer_state_opt.is_some() {
+ let mut peer_state_lock = peer_state_opt.unwrap();
+ let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
let counterparty_node_id = chan.get().get_counterparty_node_id();
- match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
- Ok(msgs_monitor_option) => {
- if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
- match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) {
- ChannelMonitorUpdateStatus::Completed => {},
- e => {
- log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
- "Failed to update channel monitor with preimage {:?}: {:?}",
- payment_preimage, e);
- let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err();
- mem::drop(peer_state_opt);
- mem::drop(per_peer_state_lock);
- self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
- return Err((counterparty_node_id, err));
- }
- }
- if let Some((msg, commitment_signed)) = msgs {
- log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
- log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
- peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: counterparty_node_id,
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: vec![msg],
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed,
- }
- });
- }
- mem::drop(peer_state_opt);
- mem::drop(per_peer_state_lock);
- self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
- Ok(())
- } else {
- Ok(())
- }
- },
- Err((e, monitor_update)) => {
- match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) {
- ChannelMonitorUpdateStatus::Completed => {},
- e => {
- // TODO: This needs to be handled somehow - if we receive a monitor update
- // with a preimage we *must* somehow manage to propagate it to the upstream
- // channel, or we must have an ability to receive the same update and try
- // again on restart.
- log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
- "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
- payment_preimage, e);
- },
- }
- let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
- if drop {
- chan.remove_entry();
- }
- mem::drop(peer_state_opt);
- mem::drop(per_peer_state_lock);
- self.handle_monitor_update_completion_actions(completion_action(None));
- Err((counterparty_node_id, res))
- },
+ let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
+
+ if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
+ if let Some(action) = completion_action(Some(htlc_value_msat)) {
+ log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+ log_bytes!(chan_id), action);
+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
+ }
+ let update_id = monitor_update.update_id;
+ let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
+ let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+ peer_state, per_peer_state, chan);
+ if let Err(e) = res {
+ // TODO: This is a *critical* error - we probably updated the outbound edge
+ // of the HTLC's monitor with a preimage. We should retry this monitor
+ // update over and over again until morale improves.
+ log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
+ return Err((counterparty_node_id, e));
+ }
}
- } else {
- // We've held the peer_state mutex since finding the channel and setting
- // found_channel to true, so the channel can't have been dropped.
- unreachable!()
- }
- } else {
- let preimage_update = ChannelMonitorUpdate {
- update_id: CLOSED_CHANNEL_UPDATE_ID,
- updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
- payment_preimage,
- }],
- };
- // We update the ChannelMonitor on the backward link, after
- // receiving an `update_fulfill_htlc` from the forward link.
- let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
- if update_res != ChannelMonitorUpdateStatus::Completed {
- // TODO: This needs to be handled somehow - if we receive a monitor update
- // with a preimage we *must* somehow manage to propagate it to the upstream
- // channel, or we must have an ability to receive the same event and try
- // again on restart.
- log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
- payment_preimage, update_res);
+ return Ok(());
}
- mem::drop(peer_state_opt);
- mem::drop(per_peer_state_lock);
- // Note that we do process the completion action here. This totally could be a
- // duplicate claim, but we have no way of knowing without interrogating the
- // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
- // generally always allowed to be duplicative (and it's specifically noted in
- // `PaymentForwarded`).
- self.handle_monitor_update_completion_actions(completion_action(None));
- Ok(())
}
+ let preimage_update = ChannelMonitorUpdate {
+ update_id: CLOSED_CHANNEL_UPDATE_ID,
+ updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage,
+ }],
+ };
+ // We update the ChannelMonitor on the backward link, after
+ // receiving an `update_fulfill_htlc` from the forward link.
+ let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
+ if update_res != ChannelMonitorUpdateStatus::Completed {
+ // TODO: This needs to be handled somehow - if we receive a monitor update
+ // with a preimage we *must* somehow manage to propagate it to the upstream
+ // channel, or we must have an ability to receive the same event and try
+ // again on restart.
+ log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+ payment_preimage, update_res);
+ }
+ // Note that we do process the completion action here. This totally could be a
+ // duplicate claim, but we have no way of knowing without interrogating the
+ // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
+ // generally always allowed to be duplicative (and it's specifically noted in
+ // `PaymentForwarded`).
+ self.handle_monitor_update_completion_actions(completion_action(None));
+ Ok(())
}
fn finalize_claims(&self, sources: Vec<HTLCSource>) {
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_outpoint = hop_data.outpoint;
- let res = self.claim_funds_from_hop(self.per_peer_state.read().unwrap(), hop_data, payment_preimage,
+ let res = self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat| {
if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
-> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
+ log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
+ log_bytes!(channel.channel_id()),
+ if raa.is_some() { "an" } else { "no" },
+ if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
+ if funding_broadcastable.is_some() { "" } else { "not " },
+ if channel_ready.is_some() { "sending" } else { "without" },
+ if announcement_sigs.is_some() { "sending" } else { "without" });
+
let mut htlc_forwards = None;
let counterparty_node_id = channel.get_counterparty_node_id();
}
fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
- let htlc_forwards;
- let (mut pending_failures, finalized_claims, counterparty_node_id) = {
- let counterparty_node_id = match counterparty_node_id {
- Some(cp_id) => cp_id.clone(),
- None => {
- // TODO: Once we can rely on the counterparty_node_id from the
- // monitor event, this and the id_to_peer map should be removed.
- let id_to_peer = self.id_to_peer.lock().unwrap();
- match id_to_peer.get(&funding_txo.to_channel_id()) {
- Some(cp_id) => cp_id.clone(),
- None => return,
- }
- }
- };
- let per_peer_state = self.per_peer_state.read().unwrap();
- let mut peer_state_lock;
- let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
- if let None = peer_state_mutex_opt { return }
- peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- let mut channel = {
- match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
- hash_map::Entry::Occupied(chan) => chan,
- hash_map::Entry::Vacant(_) => return,
+ let counterparty_node_id = match counterparty_node_id {
+ Some(cp_id) => cp_id.clone(),
+ None => {
+ // TODO: Once we can rely on the counterparty_node_id from the
+ // monitor event, this and the id_to_peer map should be removed.
+ let id_to_peer = self.id_to_peer.lock().unwrap();
+ match id_to_peer.get(&funding_txo.to_channel_id()) {
+ Some(cp_id) => cp_id.clone(),
+ None => return,
}
- };
- if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
- return;
}
-
- let updates = channel.get_mut().monitor_updating_restored(&self.logger, &self.node_signer, self.genesis_hash, &self.default_configuration, self.best_block.read().unwrap().height());
- let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
- // We only send a channel_update in the case where we are just now sending a
- // channel_ready and the channel is in a usable state. We may re-send a
- // channel_update later through the announcement_signatures process for public
- // channels, but there's no reason not to just inform our counterparty of our fees
- // now.
- if let Ok(msg) = self.get_channel_update_for_unicast(channel.get()) {
- Some(events::MessageSendEvent::SendChannelUpdate {
- node_id: channel.get().get_counterparty_node_id(),
- msg,
- })
- } else { None }
- } else { None };
- htlc_forwards = self.handle_channel_resumption(&mut peer_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
- if let Some(upd) = channel_update {
- peer_state.pending_msg_events.push(upd);
+ };
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let mut peer_state_lock;
+ let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+ if peer_state_mutex_opt.is_none() { return }
+ peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ let mut channel = {
+ match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
+ hash_map::Entry::Occupied(chan) => chan,
+ hash_map::Entry::Vacant(_) => return,
}
-
- (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
};
- if let Some(forwards) = htlc_forwards {
- self.forward_htlcs(&mut [forwards][..]);
- }
- self.finalize_claims(finalized_claims);
- for failure in pending_failures.drain(..) {
- let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
- self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
+ log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}",
+ highest_applied_update_id, channel.get().get_latest_monitor_update_id());
+ if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
+ return;
}
+ handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel.get_mut());
}
/// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let peers_without_funded_channels = self.peers_without_funded_channels(|peer| !peer.channel_by_id.is_empty());
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(APIError::APIMisuseError { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) });
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ let is_only_peer_channel = peer_state.channel_by_id.len() == 1;
match peer_state.channel_by_id.entry(temporary_channel_id.clone()) {
hash_map::Entry::Occupied(mut channel) => {
if !channel.get().inbound_is_awaiting_accept() {
peer_state.pending_msg_events.push(send_msg_err_event);
let _ = remove_channel!(self, channel);
return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+ } else {
+ // If this peer already has some channels, a new channel won't increase our number of peers
+ // with unfunded channels, so as long as we aren't over the maximum number of unfunded
+ // channels per-peer we can accept channels from a peer with existing ones.
+ if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
+ let send_msg_err_event = events::MessageSendEvent::HandleError {
+ node_id: channel.get().get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage{
+ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
+ }
+ };
+ peer_state.pending_msg_events.push(send_msg_err_event);
+ let _ = remove_channel!(self, channel);
+ return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() });
+ }
}
peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
Ok(())
}
- fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
- if msg.chain_hash != self.genesis_hash {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
- }
+ /// Gets the number of peers which match the given filter and do not have any funded, outbound,
+ /// or 0-conf channels.
+ ///
+ /// The filter is called for each peer and provided with the number of unfunded, inbound, and
+ /// non-0-conf channels we have with the peer.
+ fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
+ where Filter: Fn(&PeerState<<SP::Target as SignerProvider>::Signer>) -> bool {
+ let mut peers_without_funded_channels = 0;
+ let best_block_height = self.best_block.read().unwrap().height();
+ {
+ let peer_state_lock = self.per_peer_state.read().unwrap();
+ for (_, peer_mtx) in peer_state_lock.iter() {
+ let peer = peer_mtx.lock().unwrap();
+ if !maybe_count_peer(&*peer) { continue; }
+ let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height);
+ if num_unfunded_channels == peer.channel_by_id.len() {
+ peers_without_funded_channels += 1;
+ }
+ }
+ }
+ return peers_without_funded_channels;
+ }
+
+ fn unfunded_channel_count(
+ peer: &PeerState<<SP::Target as SignerProvider>::Signer>, best_block_height: u32
+ ) -> usize {
+ let mut num_unfunded_channels = 0;
+ for (_, chan) in peer.channel_by_id.iter() {
+ if !chan.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 &&
+ chan.get_funding_tx_confirmations(best_block_height) == 0
+ {
+ num_unfunded_channels += 1;
+ }
+ }
+ num_unfunded_channels
+ }
+
+ fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
+ if msg.chain_hash != self.genesis_hash {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
+ }
if !self.default_configuration.accept_inbound_channels {
return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone()));
let mut random_bytes = [0u8; 16];
random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
let user_channel_id = u128::from_be_bytes(random_bytes);
-
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
+
+ // Get the number of peers with channels, but without funded ones. We don't care too much
+ // about peers that never open a channel, so we filter by peers that have at least one
+ // channel, and then limit the number of those with unfunded channels.
+ let channeled_peers_without_funding = self.peers_without_funded_channels(|node| !node.channel_by_id.is_empty());
+
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone()))
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone())
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+
+ // If this peer already has some channels, a new channel won't increase our number of peers
+ // with unfunded channels, so as long as we aren't over the maximum number of unfunded
+ // channels per-peer we can accept channels from a peer with existing ones.
+ if peer_state.channel_by_id.is_empty() &&
+ channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS &&
+ !self.default_configuration.manually_accept_inbound_channels
+ {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Have too many peers with unfunded channels, not accepting new ones".to_owned(),
+ msg.temporary_channel_id.clone()));
+ }
+
+ let best_block_height = self.best_block.read().unwrap().height();
+ if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(
+ format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
+ msg.temporary_channel_id.clone()));
+ }
+
let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
- counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, &self.default_configuration,
- self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias)
+ counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
+ &self.default_configuration, best_block_height, &self.logger, outbound_scid_alias)
{
Err(e) => {
self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
let (value, output_script, user_id) = {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id))
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
hash_map::Entry::Occupied(mut chan) => {
}
fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
+ let best_block = *self.best_block.read().unwrap();
+
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id))
- }
- let ((funding_msg, monitor, mut channel_ready), mut chan) = {
- let best_block = *self.best_block.read().unwrap();
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
- let peer_state = &mut *peer_state_lock;
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
+ })?;
+
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ let ((funding_msg, monitor), chan) =
match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
hash_map::Entry::Occupied(mut chan) => {
(try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
- }
- };
- // Because we have exclusive ownership of the channel here we can release the peer_state
- // lock before watch_channel
- match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
- ChannelMonitorUpdateStatus::Completed => {},
- ChannelMonitorUpdateStatus::PermanentFailure => {
- // Note that we reply with the new channel_id in error messages if we gave up on the
- // channel, not the temporary_channel_id. This is compatible with ourselves, but the
- // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
- // any messages referencing a previously-closed channel anyway.
- // We do not propagate the monitor update to the user as it would be for a monitor
- // that we didn't manage to store (and that we don't care about - we don't respond
- // with the funding_signed so the channel can never go on chain).
- let (_monitor_update, failed_htlcs) = chan.force_shutdown(false);
- assert!(failed_htlcs.is_empty());
- return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
- },
- ChannelMonitorUpdateStatus::InProgress => {
- // There's no problem signing a counterparty's funding transaction if our monitor
- // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
- // accepted payment from yet. We do, however, need to wait to send our channel_ready
- // until we have persisted our monitor.
- chan.monitor_updating_paused(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
- channel_ready = None; // Don't send the channel_ready now
- },
- }
- // It's safe to unwrap as we've held the `per_peer_state` read lock since checking that the
- // peer exists, despite the inner PeerState potentially having no channels after removing
- // the channel above.
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
- let peer_state = &mut *peer_state_lock;
+ };
+
match peer_state.channel_by_id.entry(funding_msg.channel_id) {
hash_map::Entry::Occupied(_) => {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
+ Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
},
hash_map::Entry::Vacant(e) => {
- let mut id_to_peer = self.id_to_peer.lock().unwrap();
- match id_to_peer.entry(chan.channel_id()) {
+ match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) {
hash_map::Entry::Occupied(_) => {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
"The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
i_e.insert(chan.get_counterparty_node_id());
}
}
+
+ // There's no problem signing a counterparty's funding transaction if our monitor
+ // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
+ // accepted payment from yet. We do, however, need to wait to send our channel_ready
+ // until we have persisted our monitor.
+ let new_channel_id = funding_msg.channel_id;
peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
node_id: counterparty_node_id.clone(),
msg: funding_msg,
});
- if let Some(msg) = channel_ready {
- send_channel_ready!(self, peer_state.pending_msg_events, chan, msg);
+
+ let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
+
+ let chan = e.insert(chan);
+ let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state,
+ per_peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) });
+
+ // Note that we reply with the new channel_id in error messages if we gave up on the
+ // channel, not the temporary_channel_id. This is compatible with ourselves, but the
+ // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
+ // any messages referencing a previously-closed channel anyway.
+ // We do not propagate the monitor update to the user as it would be for a monitor
+ // that we didn't manage to store (and that we don't care about - we don't respond
+ // with the funding_signed so the channel can never go on chain).
+ if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
+ res.0 = None;
}
- e.insert(chan);
+ res
}
}
- Ok(())
}
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
- let funding_tx = {
- let best_block = *self.best_block.read().unwrap();
- let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
- }
+ let best_block = *self.best_block.read().unwrap();
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger) {
- Ok(update) => update,
- Err(e) => try_chan_entry!(self, Err(e), chan),
- };
- match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
- ChannelMonitorUpdateStatus::Completed => {},
- e => {
- let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
- if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
- // We weren't able to watch the channel to begin with, so no updates should be made on
- // it. Previously, full_stack_target found an (unreachable) panic when the
- // monitor update contained within `shutdown_finish` was applied.
- if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
- shutdown_finish.0.take();
- }
- }
- return res
- },
- }
- if let Some(msg) = channel_ready {
- send_channel_ready!(self, peer_state.pending_msg_events, chan.get(), msg);
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ let monitor = try_chan_entry!(self,
+ chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
+ let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor);
+ let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan);
+ if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+ // We weren't able to watch the channel to begin with, so no updates should be made on
+ // it. Previously, full_stack_target found an (unreachable) panic when the
+ // monitor update contained within `shutdown_finish` was applied.
+ if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+ shutdown_finish.0.take();
}
- funding_tx
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
- }
- };
- log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid());
- self.tx_broadcaster.broadcast_transaction(&funding_tx);
- Ok(())
+ }
+ res
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+ }
}
fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id));
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
let result: Result<(), _> = loop {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
}
- let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
+ let funding_txo_opt = chan_entry.get().get_funding_txo();
+ let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
+ chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
dropped_htlcs = htlcs;
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update {
- let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update);
- let (result, is_permanent) =
- handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
- if is_permanent {
- remove_channel!(self, chan_entry);
- break result;
- }
- }
-
if let Some(msg) = shutdown {
+ // We can send the `shutdown` message before updating the `ChannelMonitor`
+ // here as we don't need the monitor update to complete until we send a
+ // `shutdown_signed`, which we'll delay if we're pending a monitor update.
peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: *counterparty_node_id,
msg,
});
}
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update_opt {
+ let update_id = monitor_update.update_id;
+ let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
+ break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
+ }
break Ok(());
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
}
- let _ = handle_error!(self, result, *counterparty_node_id);
- Ok(())
+ result
}
fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
- }
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
let (tx, chan_option) = {
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
}
if let Some(chan) = chan_option {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
let pending_forward_info = self.decode_update_add_htlc_onion(msg);
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
let (htlc_source, forwarded_htlc_value) = {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id));
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id));
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
- let (revoke_and_ack, commitment_signed, monitor_update) =
- match chan.get_mut().commitment_signed(&msg, &self.logger) {
- Err((None, e)) => try_chan_entry!(self, Err(e), chan),
- Err((Some(update), e)) => {
- assert!(chan.get().is_awaiting_monitor_update());
- let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &update);
- try_chan_entry!(self, Err(e), chan);
- unreachable!();
- },
- Ok(res) => res
- };
- let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update);
- if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
- return Err(e);
- }
-
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
- node_id: counterparty_node_id.clone(),
- msg: revoke_and_ack,
- });
- if let Some(msg) = commitment_signed {
- peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: counterparty_node_id.clone(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed: msg,
- },
- });
- }
- Ok(())
+ let funding_txo = chan.get().get_funding_txo();
+ let monitor_update = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
+ let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
+ let update_id = monitor_update.update_id;
+ handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+ peer_state, per_peer_state, chan)
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
#[inline]
fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
- let mut forward_event = None;
+ let mut push_forward_event = false;
let mut new_intercept_events = Vec::new();
let mut failed_intercept_forwards = Vec::new();
if !pending_forwards.is_empty() {
// We don't want to generate a PendingHTLCsForwardable event if only intercepted
// payments are being processed.
if forward_htlcs_empty {
- forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS));
+ push_forward_event = true;
}
entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })));
let mut events = self.pending_events.lock().unwrap();
events.append(&mut new_intercept_events);
}
+ if push_forward_event { self.push_pending_forwards_ev() }
+ }
+ }
- match forward_event {
- Some(time) => {
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::PendingHTLCsForwardable {
- time_forwardable: time
- });
- }
- None => {},
- }
+ // We only want to push a PendingHTLCsForwardable event if no others are queued.
+ fn push_pending_forwards_ev(&self) {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ let forward_ev_exists = pending_events.iter()
+ .find(|ev| if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false })
+ .is_some();
+ if !forward_ev_exists {
+ pending_events.push(events::Event::PendingHTLCsForwardable {
+ time_forwardable:
+ Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
+ });
}
}
fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
- let mut htlcs_to_fail = Vec::new();
- let res = loop {
+ let (htlcs_to_fail, res) = {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ }).map(|mtx| mtx.lock().unwrap())?;
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
- let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
- let raa_updates = break_chan_entry!(self,
- chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
- htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
- let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &raa_updates.monitor_update);
- if was_paused_for_mon_update {
- assert!(update_res != ChannelMonitorUpdateStatus::Completed);
- assert!(raa_updates.commitment_update.is_none());
- assert!(raa_updates.accepted_htlcs.is_empty());
- assert!(raa_updates.failed_htlcs.is_empty());
- assert!(raa_updates.finalized_claimed_htlcs.is_empty());
- break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
- }
- if update_res != ChannelMonitorUpdateStatus::Completed {
- if let Err(e) = handle_monitor_update_res!(self, update_res, chan,
- RAACommitmentOrder::CommitmentFirst, false,
- raa_updates.commitment_update.is_some(), false,
- raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
- raa_updates.finalized_claimed_htlcs) {
- break Err(e);
- } else { unreachable!(); }
- }
- if let Some(updates) = raa_updates.commitment_update {
- peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: counterparty_node_id.clone(),
- updates,
- });
- }
- break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
- raa_updates.finalized_claimed_htlcs,
- chan.get().get_short_channel_id()
- .unwrap_or(chan.get().outbound_scid_alias()),
- chan.get().get_funding_txo().unwrap(),
- chan.get().get_user_id()))
+ let funding_txo = chan.get().get_funding_txo();
+ let (htlcs_to_fail, monitor_update) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
+ let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
+ let update_id = monitor_update.update_id;
+ let res = handle_new_monitor_update!(self, update_res, update_id,
+ peer_state_lock, peer_state, per_peer_state, chan);
+ (htlcs_to_fail, res)
},
- hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
- match res {
- Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs,
- short_channel_id, channel_outpoint, user_channel_id)) =>
- {
- for failure in pending_failures.drain(..) {
- let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
- self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
- }
- self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, user_channel_id, pending_forwards)]);
- self.finalize_claims(finalized_claim_htlcs);
- Ok(())
- },
- Err(e) => Err(e)
- }
+ res
}
fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id));
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id));
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
), chan),
// Note that announcement_signatures fails if the channel cannot be announced,
// so get_channel_update_for_broadcast will never fail by the time we get here.
- update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
+ update_msg: Some(self.get_channel_update_for_broadcast(chan.get()).unwrap()),
});
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
};
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
- if let None = peer_state_mutex_opt {
+ if peer_state_mutex_opt.is_none() {
return Ok(NotifyOption::SkipPersist)
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let need_lnd_workaround = {
let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id));
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
/// Process pending events from the `chain::Watch`, returning whether any events were processed.
fn process_pending_monitor_events(&self) -> bool {
+ debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
+
let mut failed_channels = Vec::new();
let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
let has_pending_monitor_events = !pending_monitor_events.is_empty();
/// update events as a separate process method here.
#[cfg(fuzzing)]
pub fn process_monitor_events(&self) {
- self.process_pending_monitor_events();
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ if self.process_pending_monitor_events() {
+ NotifyOption::DoPersist
+ } else {
+ NotifyOption::SkipPersist
+ }
+ });
}
/// Check the holding cell in each channel and free any pending HTLCs in them if possible.
let mut has_monitor_update = false;
let mut failed_htlcs = Vec::new();
let mut handle_errors = Vec::new();
- {
- let per_peer_state = self.per_peer_state.read().unwrap();
+ // Walk our list of channels and find any that need to update. Note that when we do find an
+ // update, if it includes actions that must be taken afterwards, we have to drop the
+ // per-peer state lock as well as the top level per_peer_state lock. Thus, we loop until we
+ // manage to go through all our peers without finding a single channel to update.
+ 'peer_loop: loop {
+ let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- let pending_msg_events = &mut peer_state.pending_msg_events;
- peer_state.channel_by_id.retain(|channel_id, chan| {
- match chan.maybe_free_holding_cell_htlcs(&self.logger) {
- Ok((commitment_opt, holding_cell_failed_htlcs)) => {
- if !holding_cell_failed_htlcs.is_empty() {
- failed_htlcs.push((
- holding_cell_failed_htlcs,
- *channel_id,
- chan.get_counterparty_node_id()
- ));
- }
- if let Some((commitment_update, monitor_update)) = commitment_opt {
- match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), &monitor_update) {
- ChannelMonitorUpdateStatus::Completed => {
- pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get_counterparty_node_id(),
- updates: commitment_update,
- });
- },
- e => {
- has_monitor_update = true;
- let (res, close_channel) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
- handle_errors.push((chan.get_counterparty_node_id(), res));
- if close_channel { return false; }
- },
- }
+ 'chan_loop: loop {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
+ for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
+ let counterparty_node_id = chan.get_counterparty_node_id();
+ let funding_txo = chan.get_funding_txo();
+ let (monitor_opt, holding_cell_failed_htlcs) =
+ chan.maybe_free_holding_cell_htlcs(&self.logger);
+ if !holding_cell_failed_htlcs.is_empty() {
+ failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
+ }
+ if let Some(monitor_update) = monitor_opt {
+ has_monitor_update = true;
+
+ let update_res = self.chain_monitor.update_channel(
+ funding_txo.expect("channel is live"), monitor_update);
+ let update_id = monitor_update.update_id;
+ let channel_id: [u8; 32] = *channel_id;
+ let res = handle_new_monitor_update!(self, update_res, update_id,
+ peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
+ peer_state.channel_by_id.remove(&channel_id));
+ if res.is_err() {
+ handle_errors.push((counterparty_node_id, res));
}
- true
- },
- Err(e) => {
- let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
- handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
- // ChannelClosed event is generated by handle_error for us
- !close_channel
+ continue 'peer_loop;
}
}
- });
+ break 'chan_loop;
+ }
}
+ break 'peer_loop;
}
let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
/// [`PaymentHash`] and [`PaymentPreimage`] for you.
///
/// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`], which
- /// will have the [`PaymentClaimable::payment_preimage`] field filled in. That should then be
+ /// will have the [`PaymentClaimable::purpose`] be [`PaymentPurpose::InvoicePayment`] with
+ /// its [`PaymentPurpose::InvoicePayment::payment_preimage`] field filled in. That should then be
/// passed directly to [`claim_funds`].
///
/// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements.
///
/// [`claim_funds`]: Self::claim_funds
/// [`PaymentClaimable`]: events::Event::PaymentClaimable
- /// [`PaymentClaimable::payment_preimage`]: events::Event::PaymentClaimable::payment_preimage
+ /// [`PaymentClaimable::purpose`]: events::Event::PaymentClaimable::purpose
+ /// [`PaymentPurpose::InvoicePayment`]: events::PaymentPurpose::InvoicePayment
+ /// [`PaymentPurpose::InvoicePayment::payment_preimage`]: events::PaymentPurpose::InvoicePayment::payment_preimage
/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32,
min_final_cltv_expiry_delta: Option<u16>) -> Result<(PaymentHash, PaymentSecret), ()> {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if peer_state.pending_msg_events.len() > 0 {
- let mut peer_pending_events = Vec::new();
- mem::swap(&mut peer_pending_events, &mut peer_state.pending_msg_events);
- pending_events.append(&mut peer_pending_events);
+ pending_events.append(&mut peer_state.pending_msg_events);
}
}
msg: announcement,
// Note that announcement_signatures fails if the channel cannot be announced,
// so get_channel_update_for_broadcast will never fail by the time we get here.
- update_msg: self.get_channel_update_for_broadcast(channel).unwrap(),
+ update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()),
});
}
}
let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
}
- fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) {
+ fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut failed_channels = Vec::new();
- let mut no_channels_remain = true;
let mut per_peer_state = self.per_peer_state.write().unwrap();
- {
- log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
- log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
+ let remove_peer = {
+ log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates.",
+ log_pubkey!(counterparty_node_id));
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
update_maps_on_chan_removal!(self, chan);
self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
return false;
- } else {
- no_channels_remain = false;
}
true
});
&events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+ &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&events::MessageSendEvent::SendChannelUpdate { .. } => false,
&events::MessageSendEvent::HandleError { .. } => false,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
&events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
}
});
- }
- }
- if no_channels_remain {
+ debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
+ peer_state.is_connected = false;
+ peer_state.ok_to_remove(true)
+ } else { debug_assert!(false, "Unconnected peer disconnected"); true }
+ };
+ if remove_peer {
per_peer_state.remove(counterparty_node_id);
}
mem::drop(per_peer_state);
}
}
- fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> {
+ fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
if !init_msg.features.supports_static_remote_key() {
- log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting with no_connection_possible", log_pubkey!(counterparty_node_id));
+ log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
return Err(());
}
- log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
-
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ // If we have too many peers connected which don't have funded channels, disconnect the
+ // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
+ // unfunded channels taking up space in memory for disconnected peers, we still let new
+ // peers connect, but we'll reject new channels from them.
+ let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
+ let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
+
{
let mut peer_state_lock = self.per_peer_state.write().unwrap();
match peer_state_lock.entry(counterparty_node_id.clone()) {
hash_map::Entry::Vacant(e) => {
+ if inbound_peer_limited {
+ return Err(());
+ }
e.insert(Mutex::new(PeerState {
channel_by_id: HashMap::new(),
latest_features: init_msg.features.clone(),
pending_msg_events: Vec::new(),
+ monitor_update_blocked_actions: BTreeMap::new(),
+ is_connected: true,
}));
},
hash_map::Entry::Occupied(e) => {
- e.get().lock().unwrap().latest_features = init_msg.features.clone();
+ let mut peer_state = e.get().lock().unwrap();
+ peer_state.latest_features = init_msg.features.clone();
+
+ let best_block_height = self.best_block.read().unwrap().height();
+ if inbound_peer_limited &&
+ Self::unfunded_channel_count(&*peer_state, best_block_height) ==
+ peer_state.channel_by_id.len()
+ {
+ return Err(());
+ }
+
+ debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
+ peer_state.is_connected = true;
},
}
}
- let per_peer_state = self.per_peer_state.read().unwrap();
+ log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
+ let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let channel_ids: Vec<[u8; 32]> = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt { return; }
+ if peer_state_mutex_opt.is_none() { return; }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
peer_state.channel_by_id.keys().cloned().collect()
// First check if we can advance the channel type and try again.
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if let None = peer_state_mutex_opt { return; }
+ if peer_state_mutex_opt.is_none() { return; }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
(33, self.inbound_htlc_minimum_msat, option),
(35, self.inbound_htlc_maximum_msat, option),
(37, user_channel_id_high_opt, option),
+ (39, self.feerate_sat_per_1000_weight, option),
});
Ok(())
}
(33, inbound_htlc_minimum_msat, option),
(35, inbound_htlc_maximum_msat, option),
(37, user_channel_id_high_opt, option),
+ (39, feerate_sat_per_1000_weight, option),
});
// `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
is_public: is_public.0.unwrap(),
inbound_htlc_minimum_msat,
inbound_htlc_maximum_msat,
+ feerate_sat_per_1000_weight,
})
}
}
impl Readable for ClaimableHTLC {
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
- let mut prev_hop = crate::util::ser::OptionDeserWrapper(None);
+ let mut prev_hop = crate::util::ser::RequiredWrapper(None);
let mut value = 0;
let mut payment_data: Option<msgs::FinalOnionHopData> = None;
let mut cltv_expiry = 0;
let id: u8 = Readable::read(reader)?;
match id {
0 => {
- let mut session_priv: crate::util::ser::OptionDeserWrapper<SecretKey> = crate::util::ser::OptionDeserWrapper(None);
+ let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
let mut first_hop_htlc_msat: u64 = 0;
- let mut path = Some(Vec::new());
+ let mut path: Option<Vec<RouteHop>> = Some(Vec::new());
let mut payment_id = None;
let mut payment_secret = None;
- let mut payment_params = None;
+ let mut payment_params: Option<PaymentParameters> = None;
read_tlv_fields!(reader, {
(0, session_priv, required),
(1, payment_id, option),
(2, first_hop_htlc_msat, required),
(3, payment_secret, option),
(4, path, vec_type),
- (5, payment_params, option),
+ (5, payment_params, (option: ReadableArgs, 0)),
});
if payment_id.is_none() {
// For backwards compat, if there was no payment_id written, use the session_priv bytes
// instead.
payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
}
+ if path.is_none() || path.as_ref().unwrap().is_empty() {
+ return Err(DecodeError::InvalidValue);
+ }
+ let path = path.unwrap();
+ if let Some(params) = payment_params.as_mut() {
+ if params.final_cltv_expiry_delta == 0 {
+ params.final_cltv_expiry_delta = path.last().unwrap().cltv_expiry_delta;
+ }
+ }
Ok(HTLCSource::OutboundRoute {
session_priv: session_priv.0.unwrap(),
first_hop_htlc_msat,
- path: path.unwrap(),
+ path,
payment_id: payment_id.unwrap(),
payment_secret,
- payment_params,
})
}
1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
impl Writeable for HTLCSource {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
match self {
- HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret, payment_params } => {
+ HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret } => {
0u8.write(writer)?;
let payment_id_opt = Some(payment_id);
write_tlv_fields!(writer, {
(2, first_hop_htlc_msat, required),
(3, payment_secret, option),
(4, *path, vec_type),
- (5, payment_params, option),
+ (5, None::<PaymentParameters>, option), // payment_params in LDK versions prior to 0.0.115
});
}
HTLCSource::PreviousHopData(ref field) => {
best_block.block_hash().write(writer)?;
}
+ let mut serializable_peer_count: u64 = 0;
{
let per_peer_state = self.per_peer_state.read().unwrap();
let mut unfunded_channels = 0;
for (_, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ if !peer_state.ok_to_remove(false) {
+ serializable_peer_count += 1;
+ }
number_of_channels += peer_state.channel_by_id.len();
for (_, channel) in peer_state.channel_by_id.iter() {
if !channel.is_funding_initiated() {
htlc_purposes.push(purpose);
}
- (per_peer_state.len() as u64).write(writer)?;
- for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() {
- peer_pubkey.write(writer)?;
- let peer_state = peer_state_mutex.lock().unwrap();
- peer_state.latest_features.write(writer)?;
+ let mut monitor_update_blocked_actions_per_peer = None;
+ let mut peer_states = Vec::new();
+ for (_, peer_state_mutex) in per_peer_state.iter() {
+ // Because we're holding the owning `per_peer_state` write lock here there's no chance
+ // of a lockorder violation deadlock - no other thread can be holding any
+ // per_peer_state lock at all.
+ peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
+ }
+
+ (serializable_peer_count).write(writer)?;
+ for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
+ // Peers which we have no channels to should be dropped once disconnected. As we
+ // disconnect all peers when shutting down and serializing the ChannelManager, we
+ // consider all peers as disconnected here. There's therefore no need write peers with
+ // no channels.
+ if !peer_state.ok_to_remove(false) {
+ peer_pubkey.write(writer)?;
+ peer_state.latest_features.write(writer)?;
+ if !peer_state.monitor_update_blocked_actions.is_empty() {
+ monitor_update_blocked_actions_per_peer
+ .get_or_insert_with(Vec::new)
+ .push((*peer_pubkey, &peer_state.monitor_update_blocked_actions));
+ }
+ }
}
let events = self.pending_events.lock().unwrap();
// LDK versions prior to 0.0.113 do not know how to read the pending claimed payments
// map. Thus, if there are no entries we skip writing a TLV for it.
pending_claiming_payments = None;
- } else {
- debug_assert!(false, "While we have code to serialize pending_claiming_payments, the map should always be empty until a later PR");
}
write_tlv_fields!(writer, {
(3, pending_outbound_payments, required),
(4, pending_claiming_payments, option),
(5, self.our_network_pubkey, required),
+ (6, monitor_update_blocked_actions_per_peer, option),
(7, self.fake_scid_rand_bytes, required),
(9, htlc_purposes, vec_type),
(11, self.probing_cookie_secret, required),
channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()),
latest_features: Readable::read(reader)?,
pending_msg_events: Vec::new(),
+ monitor_update_blocked_actions: BTreeMap::new(),
+ is_connected: false,
};
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
}
let mut probing_cookie_secret: Option<[u8; 32]> = None;
let mut claimable_htlc_purposes = None;
let mut pending_claiming_payments = Some(HashMap::new());
+ let mut monitor_update_blocked_actions_per_peer = Some(Vec::new());
read_tlv_fields!(reader, {
(1, pending_outbound_payments_no_retry, option),
(2, pending_intercepted_htlcs, option),
(3, pending_outbound_payments, option),
(4, pending_claiming_payments, option),
(5, received_network_pubkey, option),
+ (6, monitor_update_blocked_actions_per_peer, option),
(7, fake_scid_rand_bytes, option),
(9, claimable_htlc_purposes, vec_type),
(11, probing_cookie_secret, option),
probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
}
+ if !channel_closures.is_empty() {
+ pending_events_read.append(&mut channel_closures);
+ }
+
if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
pending_outbound_payments = Some(pending_outbound_payments_compat);
} else if pending_outbound_payments.is_none() {
outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
}
pending_outbound_payments = Some(outbounds);
- } else {
+ }
+ let pending_outbounds = OutboundPayments {
+ pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
+ retry_lock: Mutex::new(())
+ };
+
+ {
// If we're tracking pending payments, ensure we haven't lost any by looking at the
// ChannelMonitor data for any channels for which we do not have authorative state
// (i.e. those for which we just force-closed above or we otherwise don't have a
// 0.0.102+
for (_, monitor) in args.channel_monitors.iter() {
if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
- for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() {
+ for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source {
if path.is_empty() {
log_error!(args.logger, "Got an empty path for a pending payment");
return Err(DecodeError::InvalidValue);
}
+
let path_amt = path.last().unwrap().fee_msat;
let mut session_priv_bytes = [0; 32];
session_priv_bytes[..].copy_from_slice(&session_priv[..]);
- match pending_outbound_payments.as_mut().unwrap().entry(payment_id) {
+ match pending_outbounds.pending_outbound_payments.lock().unwrap().entry(payment_id) {
hash_map::Entry::Occupied(mut entry) => {
let newly_added = entry.get_mut().insert(session_priv_bytes, &path);
log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
}
}
}
- for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() {
- if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source {
- let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
- info.prev_funding_outpoint == prev_hop_data.outpoint &&
- info.prev_htlc_id == prev_hop_data.htlc_id
- };
- // The ChannelMonitor is now responsible for this HTLC's
- // failure/success and will let us know what its outcome is. If we
- // still have an entry for this HTLC in `forward_htlcs` or
- // `pending_intercepted_htlcs`, we were apparently not persisted after
- // the monitor was when forwarding the payment.
- forward_htlcs.retain(|_, forwards| {
- forwards.retain(|forward| {
- if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
- if pending_forward_matches_htlc(&htlc_info) {
- log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
- log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
- false
+ for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() {
+ match htlc_source {
+ HTLCSource::PreviousHopData(prev_hop_data) => {
+ let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
+ info.prev_funding_outpoint == prev_hop_data.outpoint &&
+ info.prev_htlc_id == prev_hop_data.htlc_id
+ };
+ // The ChannelMonitor is now responsible for this HTLC's
+ // failure/success and will let us know what its outcome is. If we
+ // still have an entry for this HTLC in `forward_htlcs` or
+ // `pending_intercepted_htlcs`, we were apparently not persisted after
+ // the monitor was when forwarding the payment.
+ forward_htlcs.retain(|_, forwards| {
+ forwards.retain(|forward| {
+ if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
+ if pending_forward_matches_htlc(&htlc_info) {
+ log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+ false
+ } else { true }
} else { true }
+ });
+ !forwards.is_empty()
+ });
+ pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
+ if pending_forward_matches_htlc(&htlc_info) {
+ log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+ pending_events_read.retain(|event| {
+ if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
+ intercepted_id != ev_id
+ } else { true }
+ });
+ false
} else { true }
});
- !forwards.is_empty()
- });
- pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
- if pending_forward_matches_htlc(&htlc_info) {
- log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
- log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
- pending_events_read.retain(|event| {
- if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
- intercepted_id != ev_id
- } else { true }
- });
- false
- } else { true }
- });
+ },
+ HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } => {
+ if let Some(preimage) = preimage_opt {
+ let pending_events = Mutex::new(pending_events_read);
+ // Note that we set `from_onchain` to "false" here,
+ // deliberately keeping the pending payment around forever.
+ // Given it should only occur when we have a channel we're
+ // force-closing for being stale that's okay.
+ // The alternative would be to wipe the state when claiming,
+ // generating a `PaymentPathSuccessful` event but regenerating
+ // it and the `PaymentSent` on every restart until the
+ // `ChannelMonitor` is removed.
+ pending_outbounds.claim_htlc(payment_id, preimage, session_priv, path, false, &pending_events, &args.logger);
+ pending_events_read = pending_events.into_inner().unwrap();
+ }
+ },
}
}
}
}
}
- if !forward_htlcs.is_empty() {
+ if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() {
// If we have pending HTLCs to forward, assume we either dropped a
// `PendingHTLCsForwardable` or the user received it but never processed it as they
// shut down before the timer hit. Either way, set the time_forwardable to a small
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes());
- if !channel_closures.is_empty() {
- pending_events_read.append(&mut channel_closures);
- }
-
let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) {
Ok(key) => key,
Err(()) => return Err(DecodeError::InvalidValue)
}
}
+ for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
+ if let Some(peer_state) = per_peer_state.get_mut(&node_id) {
+ peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
+ } else {
+ log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id);
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+
let channel_manager = ChannelManager {
genesis_hash,
fee_estimator: bounded_fee_estimator,
inbound_payment_key: expanded_inbound_key,
pending_inbound_payments: Mutex::new(pending_inbound_payments),
- pending_outbound_payments: OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()) },
+ pending_outbound_payments: pending_outbounds,
pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
forward_htlcs: Mutex::new(forward_htlcs),
mod tests {
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
- use bitcoin::hashes::hex::FromHex;
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
- use bitcoin::secp256k1::ecdsa::Signature;
- use bitcoin::secp256k1::ffi::Signature as FFISignature;
- use bitcoin::blockdata::script::Script;
- use bitcoin::Txid;
use core::time::Duration;
use core::sync::atomic::Ordering;
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::channelmanager::{inbound_payment, PaymentId, PaymentSendFailure, InterceptId};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs;
- use crate::ln::msgs::{ChannelMessageHandler, OptionalField};
+ use crate::ln::msgs::ChannelMessageHandler;
use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
use crate::util::errors::APIError;
use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
// to connect messages with new values
chan.0.contents.fee_base_msat *= 2;
chan.1.contents.fee_base_msat *= 2;
- let node_a_chan_info = nodes[0].node.list_channels()[0].clone();
- let node_b_chan_info = nodes[1].node.list_channels()[0].clone();
+ let node_a_chan_info = nodes[0].node.list_channels_with_counterparty(
+ &nodes[1].node.get_our_node_id()).pop().unwrap();
+ let node_b_chan_info = nodes[1].node.list_channels_with_counterparty(
+ &nodes[0].node.get_our_node_id()).pop().unwrap();
// The first two nodes (which opened a channel) should now require fresh persistence
assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
// indicates there are more HTLCs coming.
let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &mpp_route).unwrap();
- nodes[0].node.send_payment_along_path(&mpp_route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
expect_payment_failed!(nodes[0], our_payment_hash, true);
// Send the second half of the original MPP payment.
- nodes[0].node.send_payment_along_path(&mpp_route.paths[1], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1);
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
// To start (1), send a regular payment but don't claim it.
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV),
final_value_msat: 100_000,
- final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
let payer_pubkey = nodes[0].node.get_our_node_id();
let payee_pubkey = nodes[1].node.get_our_node_id();
- nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
- nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
final_value_msat: 10_000,
- final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph.clone();
let first_hops = nodes[0].node.list_usable_channels();
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
let payer_pubkey = nodes[0].node.get_our_node_id();
let payee_pubkey = nodes[1].node.get_our_node_id();
- nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
- nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
final_value_msat: 10_000,
- final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph.clone();
let first_hops = nodes[0].node.list_usable_channels();
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
}
}
+ #[test]
+ fn test_drop_disconnected_peers_when_removing_channels() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+ check_closed_broadcast!(nodes[0], true);
+ check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+
+ {
+ // Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
+ // disconnected and the channel between has been force closed.
+ let nodes_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+ // Assert that nodes[1] isn't removed before `timer_tick_occurred` has been executed.
+ assert_eq!(nodes_0_per_peer_state.len(), 1);
+ assert!(nodes_0_per_peer_state.get(&nodes[1].node.get_our_node_id()).is_some());
+ }
+
+ nodes[0].node.timer_tick_occurred();
+
+ {
+ // Assert that nodes[1] has now been removed.
+ assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
+ }
+ }
+
#[test]
fn bad_inbound_payment_hash() {
// Add coverage for checking that a user-provided payment hash matches the payment secret.
let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
assert!(nodes_0_lock.contains_key(channel_id));
-
- assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
}
+ assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
assert!(nodes_0_lock.contains_key(channel_id));
+ }
+ {
// Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as
// as it has the funding transaction.
let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
assert!(nodes_0_lock.contains_key(channel_id));
+ }
+ {
// At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
// `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
// from `nodes[0]` for the closing transaction with the proposed fee, the channel is
fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
let expected_message = format!("Not connected to node: {}", expected_public_key);
- check_api_misuse_error_message(expected_message, res_err)
+ check_api_error_message(expected_message, res_err)
}
fn check_unkown_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key);
- check_api_misuse_error_message(expected_message, res_err)
+ check_api_error_message(expected_message, res_err)
}
- fn check_api_misuse_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
+ fn check_api_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
match res_err {
Err(APIError::APIMisuseError { err }) => {
assert_eq!(err, expected_err_message);
},
+ Err(APIError::ChannelUnavailable { err }) => {
+ assert_eq!(err, expected_err_message);
+ },
Ok(_) => panic!("Unexpected Ok"),
Err(_) => panic!("Unexpected Error"),
}
#[test]
fn test_api_calls_with_unkown_counterparty_node() {
- // Tests that our API functions and message handlers that expects a `counterparty_node_id`
- // as input, behaves as expected if the `counterparty_node_id` is an unkown peer in the
+ // Tests that our API functions that expects a `counterparty_node_id` as input, behaves as
+ // expected if the `counterparty_node_id` is an unkown peer in the
// `ChannelManager::per_peer_state` map.
let chanmon_cfg = create_chanmon_cfgs(2);
let node_cfg = create_node_cfgs(2, &chanmon_cfg);
let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
let nodes = create_network(2, &node_cfg, &node_chanmgr);
- // Boilerplate code to produce `open_channel` and `accept_channel` msgs more densly than
- // creating dummy ones.
- nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
- let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
- let accept_channel_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
-
// Dummy values
let channel_id = [4; 32];
- let signature = Signature::from(unsafe { FFISignature::new() });
let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
let intercept_id = InterceptId([0; 32]);
- // Dummy msgs
- let funding_created_msg = msgs::FundingCreated {
- temporary_channel_id: open_channel_msg.temporary_channel_id,
- funding_txid: Txid::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(),
- funding_output_index: 0,
- signature: signature,
- };
-
- let funding_signed_msg = msgs::FundingSigned {
- channel_id: channel_id,
- signature: signature,
- };
-
- let channel_ready_msg = msgs::ChannelReady {
- channel_id: channel_id,
- next_per_commitment_point: unkown_public_key,
- short_channel_id_alias: None,
- };
-
- let announcement_signatures_msg = msgs::AnnouncementSignatures {
- channel_id: channel_id,
- short_channel_id: 0,
- node_signature: signature,
- bitcoin_signature: signature,
- };
-
- let channel_reestablish_msg = msgs::ChannelReestablish {
- channel_id: channel_id,
- next_local_commitment_number: 0,
- next_remote_commitment_number: 0,
- data_loss_protect: OptionalField::Absent,
- };
-
- let closing_signed_msg = msgs::ClosingSigned {
- channel_id: channel_id,
- fee_satoshis: 1000,
- signature: signature,
- fee_range: None,
- };
-
- let shutdown_msg = msgs::Shutdown {
- channel_id: channel_id,
- scriptpubkey: Script::new(),
- };
-
- let onion_routing_packet = msgs::OnionPacket {
- version: 255,
- public_key: Ok(unkown_public_key),
- hop_data: [1; 20*65],
- hmac: [2; 32]
- };
-
- let update_add_htlc_msg = msgs::UpdateAddHTLC {
- channel_id: channel_id,
- htlc_id: 0,
- amount_msat: 1000000,
- payment_hash: PaymentHash([1; 32]),
- cltv_expiry: 821716,
- onion_routing_packet
- };
-
- let commitment_signed_msg = msgs::CommitmentSigned {
- channel_id: channel_id,
- signature: signature,
- htlc_signatures: Vec::new(),
- };
+ // Test the API functions.
+ check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None), unkown_public_key);
- let update_fee_msg = msgs::UpdateFee {
- channel_id: channel_id,
- feerate_per_kw: 1000,
- };
+ check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&channel_id, &unkown_public_key, 42), unkown_public_key);
- let malformed_update_msg = msgs::UpdateFailMalformedHTLC{
- channel_id: channel_id,
- htlc_id: 0,
- sha256_of_onion: [1; 32],
- failure_code: 0x8000,
- };
+ check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
- let fulfill_update_msg = msgs::UpdateFulfillHTLC{
- channel_id: channel_id,
- htlc_id: 0,
- payment_preimage: PaymentPreimage([1; 32]),
- };
+ check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key), unkown_public_key);
- let fail_update_msg = msgs::UpdateFailHTLC{
- channel_id: channel_id,
- htlc_id: 0,
- reason: msgs::OnionErrorPacket { data: Vec::new()},
- };
+ check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key), unkown_public_key);
- let revoke_and_ack_msg = msgs::RevokeAndACK {
- channel_id: channel_id,
- per_commitment_secret: [1; 32],
- next_per_commitment_point: unkown_public_key,
- };
+ check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
- // Test the API functions and message handlers.
- check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None), unkown_public_key);
+ check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
+ }
- nodes[1].node.handle_open_channel(&unkown_public_key, &open_channel_msg);
+ #[test]
+ fn test_connection_limiting() {
+ // Test that we limit un-channel'd peers and un-funded channels properly.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- nodes[0].node.handle_accept_channel(&unkown_public_key, &accept_channel_msg);
+ // Note that create_network connects the nodes together for us
- check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&open_channel_msg.temporary_channel_id, &unkown_public_key, 42), unkown_public_key);
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- nodes[1].node.handle_funding_created(&unkown_public_key, &funding_created_msg);
+ let mut funding_tx = None;
+ for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_funding_signed(&unkown_public_key, &funding_signed_msg);
+ if idx == 0 {
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
+ let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
+ funding_tx = Some(tx.clone());
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx).unwrap();
+ let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
- nodes[0].node.handle_channel_ready(&unkown_public_key, &channel_ready_msg);
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+ check_added_monitors!(nodes[1], 1);
+ let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
- nodes[1].node.handle_announcement_signatures(&unkown_public_key, &announcement_signatures_msg);
+ nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
+ check_added_monitors!(nodes[0], 1);
+ }
+ open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+ }
- check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
+ // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
+ open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
+ open_channel_msg.temporary_channel_id);
+
+ // Further, because all of our channels with nodes[0] are inbound, and none of them funded,
+ // it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS
+ // limit.
+ let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS);
+ for _ in 1..super::MAX_NO_CHANNEL_PEERS {
+ let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+ &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+ peer_pks.push(random_pk);
+ nodes[1].node.peer_connected(&random_pk, &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ }
+ let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+ &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+ nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+
+ // Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
+ // them if we have too many un-channel'd peers.
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ let chan_closed_events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1);
+ for ev in chan_closed_events {
+ if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
+ }
+ nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+
+ // but of course if the connection is outbound its allowed...
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+ // Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
+ // Even though we accept one more connection from new peers, we won't actually let them
+ // open channels.
+ assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1);
+ for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
+ nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg);
+ get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
+ open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+ }
+ nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+ assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
+ open_channel_msg.temporary_channel_id);
+
+ // Of course, however, outbound channels are always allowed
+ nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None).unwrap();
+ get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, last_random_pk);
+
+ // If we fund the first channel, nodes[0] has a live on-chain channel with us, it is now
+ // "protected" and can connect again.
+ mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+ // Further, because the first channel was funded, we can open another channel with
+ // last_random_pk.
+ nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+ get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
+ }
- check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key), unkown_public_key);
+ #[test]
+ fn test_outbound_chans_unlimited() {
+ // Test that we never refuse an outbound channel even if a peer is unfuned-channel-limited
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key), unkown_public_key);
+ // Note that create_network connects the nodes together for us
- check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
+ for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+ }
- nodes[0].node.handle_shutdown(&unkown_public_key, &shutdown_msg);
+ // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
+ // rejected.
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
+ open_channel_msg.temporary_channel_id);
- nodes[1].node.handle_closing_signed(&unkown_public_key, &closing_signed_msg);
+ // but we can still open an outbound channel.
+ nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_channel_reestablish(&unkown_public_key, &channel_reestablish_msg);
+ // but even with such an outbound channel, additional inbound channels will still fail.
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
+ open_channel_msg.temporary_channel_id);
+ }
- nodes[1].node.handle_update_add_htlc(&unkown_public_key, &update_add_htlc_msg);
+ #[test]
+ fn test_0conf_limiting() {
+ // Tests that we properly limit inbound channels when we have the manual-channel-acceptance
+ // flag set and (sometimes) accept channels as 0conf.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let mut settings = test_default_channel_config();
+ settings.manually_accept_inbound_channels = true;
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(settings)]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- nodes[1].node.handle_commitment_signed(&unkown_public_key, &commitment_signed_msg);
+ // Note that create_network connects the nodes together for us
- nodes[1].node.handle_update_fail_malformed_htlc(&unkown_public_key, &malformed_update_msg);
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- nodes[1].node.handle_update_fail_htlc(&unkown_public_key, &fail_update_msg);
+ // First, get us up to MAX_UNFUNDED_CHANNEL_PEERS so we can test at the edge
+ for _ in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
+ let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+ &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+ nodes[1].node.peer_connected(&random_pk, &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
- nodes[1].node.handle_update_fulfill_htlc(&unkown_public_key, &fulfill_update_msg);
+ nodes[1].node.handle_open_channel(&random_pk, &open_channel_msg);
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ nodes[1].node.accept_inbound_channel(&temporary_channel_id, &random_pk, 23).unwrap();
+ }
+ _ => panic!("Unexpected event"),
+ }
+ get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
+ open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+ }
- nodes[1].node.handle_revoke_and_ack(&unkown_public_key, &revoke_and_ack_msg);
+ // If we try to accept a channel from another peer non-0conf it will fail.
+ let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+ &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+ nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
+ features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &last_random_pk, 23) {
+ Err(APIError::APIMisuseError { err }) =>
+ assert_eq!(err, "Too many peers with unfunded channels, refusing to accept new ones"),
+ _ => panic!(),
+ }
+ }
+ _ => panic!("Unexpected event"),
+ }
+ assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
+ open_channel_msg.temporary_channel_id);
- nodes[1].node.handle_update_fee(&unkown_public_key, &update_fee_msg);
+ // ...however if we accept the same channel 0conf it should work just fine.
+ nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &last_random_pk, 23).unwrap();
+ }
+ _ => panic!("Unexpected event"),
+ }
+ get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
}
#[cfg(anchors)]
_ => panic!("Unexpected event"),
}
- let error_msg = get_err_msg!(nodes[1], nodes[0].node.get_our_node_id());
+ let error_msg = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id());
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg);
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
// Note that this is unrealistic as each payment send will require at least two fsync
// calls per node.
let network = bitcoin::Network::Testnet;
- let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash();
let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
- let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(genesis_hash, &logger_a)));
+ let scorer = Mutex::new(test_utils::TestScorer::new());
+ let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer);
let mut config: UserConfig = Default::default();
config.channel_handshake_config.minimum_depth = 1;
let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters {
network,
- best_block: BestBlock::from_genesis(network),
+ best_block: BestBlock::from_network(network),
});
let node_a_holder = NodeHolder { node: &node_a };
let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters {
network,
- best_block: BestBlock::from_genesis(network),
+ best_block: BestBlock::from_network(network),
});
let node_b_holder = NodeHolder { node: &node_b };
- node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }).unwrap();
- node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }).unwrap();
+ node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap();
+ node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap();
node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
let block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
+ header: BlockHeader { version: 0x20000000, prev_blockhash: BestBlock::from_network(network).block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
txdata: vec![tx],
};
Listen::block_connected(&node_a, &block, 1);
_ => panic!("Unexpected event"),
}
- let dummy_graph = NetworkGraph::new(genesis_hash, &logger_a);
+ let dummy_graph = NetworkGraph::new(network, &logger_a);
let mut payment_count: u64 = 0;
macro_rules! send_payment {
let usable_channels = $node_a.list_usable_channels();
let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
.with_features($node_b.invoice_features());
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let seed = [3u8; 32];
let keys_manager = KeysManager::new(&seed, 42, 42);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
$node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
$node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
- let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, $node_a.get_our_node_id());
+ let (raa, cs) = do_get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, &$node_a.get_our_node_id());
$node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &raa);
$node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &cs);
$node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
_ => panic!("Failed to generate claim event"),
}
- let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, $node_b.get_our_node_id());
+ let (raa, cs) = do_get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, &$node_b.get_our_node_id());
$node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &raa);
$node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &cs);
$node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, PaymentId, MIN_CLTV_EXPIRY_DELTA};
use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate};
-use crate::routing::router::{PaymentParameters, Route, get_route};
+use crate::routing::router::{self, PaymentParameters, Route};
use crate::ln::features::InitFeatures;
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
+use crate::util::events::ClosureReason;
use crate::util::enforcing_trait_impls::EnforcingSigner;
use crate::util::scid_utils;
use crate::util::test_utils;
-use crate::util::test_utils::{panicking, TestChainMonitor};
-use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
+use crate::util::test_utils::{panicking, TestChainMonitor, TestScorer, TestKeysInterface};
+use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose};
use crate::util::errors::APIError;
use crate::util::config::UserConfig;
use crate::util::ser::{ReadableArgs, Writeable};
use crate::prelude::*;
use core::cell::RefCell;
use alloc::rc::Rc;
-use crate::sync::{Arc, Mutex};
+use crate::sync::{Arc, Mutex, LockTestExt};
use core::mem;
use core::iter::repeat;
use bitcoin::{PackedLockTime, TxMerkleNode};
pub persister: test_utils::TestPersister,
pub logger: test_utils::TestLogger,
pub keys_manager: test_utils::TestKeysInterface,
+ pub scorer: Mutex<test_utils::TestScorer>,
}
pub struct NodeCfg<'a> {
}
}
+/// If we need an unsafe pointer to a `Node` (ie to reference it in a thread
+/// pre-std::thread::scope), this provides that with `Sync`. Note that accessing some of the fields
+/// in the `Node` are not safe to use (i.e. the ones behind an `Rc`), but that's left to the caller
+/// to figure out.
+pub struct NodePtr(pub *const Node<'static, 'static, 'static>);
+impl NodePtr {
+ pub fn from_node<'a, 'b: 'a, 'c: 'b>(node: &Node<'a, 'b, 'c>) -> Self {
+ Self((node as *const Node<'a, 'b, 'c>).cast())
+ }
+}
+unsafe impl Send for NodePtr {}
+unsafe impl Sync for NodePtr {}
+
impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
fn drop(&mut self) {
if !panicking() {
channel_monitors.insert(monitor.get_funding_txo().0, monitor);
}
+ let scorer = Mutex::new(test_utils::TestScorer::new());
let mut w = test_utils::TestVecWriter(Vec::new());
self.node.write(&mut w).unwrap();
<(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>)>::read(&mut io::Cursor::new(w.0), ChannelManagerReadArgs {
node_signer: self.keys_manager,
signer_provider: self.keys_manager,
fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
- router: &test_utils::TestRouter::new(Arc::new(network_graph)),
+ router: &test_utils::TestRouter::new(Arc::new(network_graph), &scorer),
chain_monitor: self.chain_monitor,
tx_broadcaster: &broadcaster,
logger: &self.logger,
panic!();
}
}
- assert_eq!(*chain_source.watched_txn.lock().unwrap(), *self.chain_source.watched_txn.lock().unwrap());
- assert_eq!(*chain_source.watched_outputs.lock().unwrap(), *self.chain_source.watched_outputs.lock().unwrap());
+ assert_eq!(*chain_source.watched_txn.unsafe_well_ordered_double_lock_self(), *self.chain_source.watched_txn.unsafe_well_ordered_double_lock_self());
+ assert_eq!(*chain_source.watched_outputs.unsafe_well_ordered_double_lock_self(), *self.chain_source.watched_outputs.unsafe_well_ordered_double_lock_self());
}
}
}
(announcement, as_update, bs_update, channel_id, tx)
}
+/// Gets an RAA and CS which were sent in response to a commitment update
+///
+/// Should only be used directly when the `$node` is not actually a [`Node`].
+macro_rules! do_get_revoke_commit_msgs {
+ ($node: expr, $recipient: expr) => { {
+ let events = $node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ (match events[0] {
+ MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ assert_eq!(node_id, $recipient);
+ (*msg).clone()
+ },
+ _ => panic!("Unexpected event"),
+ }, match events[1] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+ assert_eq!(node_id, $recipient);
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ updates.commitment_signed.clone()
+ },
+ _ => panic!("Unexpected event"),
+ })
+ } }
+}
+
+/// Gets an RAA and CS which were sent in response to a commitment update
+pub fn get_revoke_commit_msgs(node: &Node, recipient: &PublicKey) -> (msgs::RevokeAndACK, msgs::CommitmentSigned) {
+ do_get_revoke_commit_msgs!(node, recipient)
+}
+
#[macro_export]
/// Gets an RAA and CS which were sent in response to a commitment update
+///
+/// Don't use this, use the identically-named function instead.
macro_rules! get_revoke_commit_msgs {
($node: expr, $node_id: expr) => {
- {
- use $crate::util::events::MessageSendEvent;
- let events = $node.node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 2);
- (match events[0] {
- MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
- assert_eq!(*node_id, $node_id);
- (*msg).clone()
- },
- _ => panic!("Unexpected event"),
- }, match events[1] {
- MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
- assert_eq!(*node_id, $node_id);
- assert!(updates.update_add_htlcs.is_empty());
- assert!(updates.update_fulfill_htlcs.is_empty());
- assert!(updates.update_fail_htlcs.is_empty());
- assert!(updates.update_fail_malformed_htlcs.is_empty());
- assert!(updates.update_fee.is_none());
- updates.commitment_signed.clone()
- },
- _ => panic!("Unexpected event"),
- })
- }
+ $crate::ln::functional_test_utils::get_revoke_commit_msgs(&$node, &$node_id)
}
}
}
/// Get an error message from the pending events queue.
-#[macro_export]
-macro_rules! get_err_msg {
- ($node: expr, $node_id: expr) => {
- {
- let events = $node.node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- $crate::util::events::MessageSendEvent::HandleError {
- action: $crate::ln::msgs::ErrorAction::SendErrorMessage { ref msg }, ref node_id
- } => {
- assert_eq!(*node_id, $node_id);
- (*msg).clone()
- },
- _ => panic!("Unexpected event"),
- }
- }
+pub fn get_err_msg(node: &Node, recipient: &PublicKey) -> msgs::ErrorMessage {
+ let events = node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::HandleError {
+ action: msgs::ErrorAction::SendErrorMessage { ref msg }, ref node_id
+ } => {
+ assert_eq!(node_id, recipient);
+ (*msg).clone()
+ },
+ _ => panic!("Unexpected event"),
}
}
}
}
+/// Gets an UpdateHTLCs MessageSendEvent
+pub fn get_htlc_update_msgs(node: &Node, recipient: &PublicKey) -> msgs::CommitmentUpdate {
+ let events = node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+ assert_eq!(node_id, recipient);
+ (*updates).clone()
+ },
+ _ => panic!("Unexpected event"),
+ }
+}
+
#[macro_export]
/// Gets an UpdateHTLCs MessageSendEvent
+///
+/// Don't use this, use the identically-named function instead.
macro_rules! get_htlc_update_msgs {
($node: expr, $node_id: expr) => {
- {
- let events = $node.node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- $crate::util::events::MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
- assert_eq!(*node_id, $node_id);
- (*updates).clone()
- },
- _ => panic!("Unexpected event"),
- }
- }
+ $crate::ln::functional_test_utils::get_htlc_update_msgs(&$node, &$node_id)
}
}
/// Fetches the first `msg_event` to the passed `node_id` in the passed `msg_events` vec.
-/// Returns the `msg_event`, along with an updated `msg_events` vec with the message removed.
+/// Returns the `msg_event`.
///
/// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate`
/// `msg_events` are stored under specific peers, this function does not fetch such `msg_events` as
/// such messages are intended to all peers.
-pub fn remove_first_msg_event_to_node(msg_node_id: &PublicKey, msg_events: &Vec<MessageSendEvent>) -> (MessageSendEvent, Vec<MessageSendEvent>) {
+pub fn remove_first_msg_event_to_node(msg_node_id: &PublicKey, msg_events: &mut Vec<MessageSendEvent>) -> MessageSendEvent {
let ev_index = msg_events.iter().position(|e| { match e {
MessageSendEvent::SendAcceptChannel { node_id, .. } => {
node_id == msg_node_id
MessageSendEvent::BroadcastChannelUpdate { .. } => {
false
},
+ MessageSendEvent::BroadcastNodeAnnouncement { .. } => {
+ false
+ },
MessageSendEvent::SendChannelUpdate { node_id, .. } => {
node_id == msg_node_id
},
},
}});
if ev_index.is_some() {
- let mut updated_msg_events = msg_events.to_vec();
- let ev = updated_msg_events.remove(ev_index.unwrap());
- (ev, updated_msg_events)
+ msg_events.remove(ev_index.unwrap())
} else {
panic!("Couldn't find any MessageSendEvent to the node!")
}
let mut per_peer_state_lock;
let mut peer_state_lock;
let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
- chan.get_feerate()
+ chan.get_feerate_sat_per_1000_weight()
}
}
}
}
/// Check whether N channel monitor(s) have been added.
+pub fn check_added_monitors(node: &Node, count: usize) {
+ let mut added_monitors = node.chain_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), count);
+ added_monitors.clear();
+}
+
+/// Check whether N channel monitor(s) have been added.
+///
+/// Don't use this, use the identically-named function instead.
#[macro_export]
macro_rules! check_added_monitors {
($node: expr, $count: expr) => {
- {
- let mut added_monitors = $node.chain_monitor.added_monitors.lock().unwrap();
- assert_eq!(added_monitors.len(), $count);
- added_monitors.clear();
- }
+ $crate::ln::functional_test_utils::check_added_monitors(&$node, $count);
}
}
assert_eq!(events_7.len(), 1);
let (announcement, bs_update) = match events_7[0] {
MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
- (msg, update_msg)
+ (msg, update_msg.clone().unwrap())
},
_ => panic!("Unexpected event"),
};
let as_update = match events_8[0] {
MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
assert!(*announcement == *msg);
+ let update_msg = update_msg.clone().unwrap();
assert_eq!(update_msg.contents.short_channel_id, announcement.contents.short_channel_id);
assert_eq!(update_msg.contents.short_channel_id, bs_update.contents.short_channel_id);
update_msg
*node_a.network_chan_count.borrow_mut() += 1;
expect_channel_ready_event(&node_b, &node_a.node.get_our_node_id());
- ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone())
+ ((*announcement).clone(), as_update, bs_update)
}
pub fn create_announced_chan_between_nodes<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
}
}
+pub fn do_check_spends<F: Fn(&bitcoin::blockdata::transaction::OutPoint) -> Option<TxOut>>(tx: &Transaction, get_output: F) {
+ for outp in tx.output.iter() {
+ assert!(outp.value >= outp.script_pubkey.dust_value().to_sat(), "Spending tx output didn't meet dust limit");
+ }
+ let mut total_value_in = 0;
+ for input in tx.input.iter() {
+ total_value_in += get_output(&input.previous_output).unwrap().value;
+ }
+ let mut total_value_out = 0;
+ for output in tx.output.iter() {
+ total_value_out += output.value;
+ }
+ let min_fee = (tx.weight() as u64 + 3) / 4; // One sat per vbyte (ie per weight/4, rounded up)
+ // Input amount - output amount = fee, so check that out + min_fee is smaller than input
+ assert!(total_value_out + min_fee <= total_value_in);
+ tx.verify(get_output).unwrap();
+}
+
#[macro_export]
macro_rules! check_spends {
($tx: expr, $($spends_txn: expr),*) => {
assert!(outp.value >= outp.script_pubkey.dust_value().to_sat(), "Input tx output didn't meet dust limit");
}
)*
- for outp in $tx.output.iter() {
- assert!(outp.value >= outp.script_pubkey.dust_value().to_sat(), "Spending tx output didn't meet dust limit");
- }
let get_output = |out_point: &bitcoin::blockdata::transaction::OutPoint| {
$(
if out_point.txid == $spends_txn.txid() {
)*
None
};
- let mut total_value_in = 0;
- for input in $tx.input.iter() {
- total_value_in += get_output(&input.previous_output).unwrap().value;
- }
- let mut total_value_out = 0;
- for output in $tx.output.iter() {
- total_value_out += output.value;
- }
- let min_fee = ($tx.weight() as u64 + 3) / 4; // One sat per vbyte (ie per weight/4, rounded up)
- // Input amount - output amount = fee, so check that out + min_fee is smaller than input
- assert!(total_value_out + min_fee <= total_value_in);
- $tx.verify(get_output).unwrap();
+ $crate::ln::functional_test_utils::do_check_spends(&$tx, get_output);
}
}
}
/// Check that a channel's closing channel update has been broadcasted, and optionally
/// check whether an error message event has occurred.
+pub fn check_closed_broadcast(node: &Node, with_error_msg: bool) -> Option<msgs::ErrorMessage> {
+ let msg_events = node.node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), if with_error_msg { 2 } else { 1 });
+ match msg_events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+ assert_eq!(msg.contents.flags & 2, 2);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ if with_error_msg {
+ match msg_events[1] {
+ MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
+ // TODO: Check node_id
+ Some(msg.clone())
+ },
+ _ => panic!("Unexpected event"),
+ }
+ } else { None }
+}
+
+/// Check that a channel's closing channel update has been broadcasted, and optionally
+/// check whether an error message event has occurred.
+///
+/// Don't use this, use the identically-named function instead.
#[macro_export]
macro_rules! check_closed_broadcast {
- ($node: expr, $with_error_msg: expr) => {{
- use $crate::util::events::MessageSendEvent;
- use $crate::ln::msgs::ErrorAction;
+ ($node: expr, $with_error_msg: expr) => {
+ $crate::ln::functional_test_utils::check_closed_broadcast(&$node, $with_error_msg)
+ }
+}
- let msg_events = $node.node.get_and_clear_pending_msg_events();
- assert_eq!(msg_events.len(), if $with_error_msg { 2 } else { 1 });
- match msg_events[0] {
- MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
- assert_eq!(msg.contents.flags & 2, 2);
+/// Check that a channel's closing channel events has been issued
+pub fn check_closed_event(node: &Node, events_count: usize, expected_reason: ClosureReason, is_check_discard_funding: bool) {
+ let events = node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), events_count, "{:?}", events);
+ let mut issues_discard_funding = false;
+ for event in events {
+ match event {
+ Event::ChannelClosed { ref reason, .. } => {
+ assert_eq!(*reason, expected_reason);
},
+ Event::DiscardFunding { .. } => {
+ issues_discard_funding = true;
+ }
_ => panic!("Unexpected event"),
}
- if $with_error_msg {
- match msg_events[1] {
- MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
- // TODO: Check node_id
- Some(msg.clone())
- },
- _ => panic!("Unexpected event"),
- }
- } else { None }
- }}
+ }
+ assert_eq!(is_check_discard_funding, issues_discard_funding);
}
/// Check that a channel's closing channel events has been issued
+///
+/// Don't use this, use the identically-named function instead.
#[macro_export]
macro_rules! check_closed_event {
($node: expr, $events: expr, $reason: expr) => {
check_closed_event!($node, $events, $reason, false);
};
- ($node: expr, $events: expr, $reason: expr, $is_check_discard_funding: expr) => {{
- use $crate::util::events::Event;
-
- let events = $node.node.get_and_clear_pending_events();
- assert_eq!(events.len(), $events, "{:?}", events);
- let expected_reason = $reason;
- let mut issues_discard_funding = false;
- for event in events {
- match event {
- Event::ChannelClosed { ref reason, .. } => {
- assert_eq!(*reason, expected_reason);
- },
- Event::DiscardFunding { .. } => {
- issues_discard_funding = true;
- }
- _ => panic!("Unexpected event"),
- }
- }
- assert_eq!($is_check_discard_funding, issues_discard_funding);
- }}
+ ($node: expr, $events: expr, $reason: expr, $is_check_discard_funding: expr) => {
+ $crate::ln::functional_test_utils::check_closed_event(&$node, $events, $reason, $is_check_discard_funding);
+ }
}
pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
}
#[macro_export]
+/// Don't use this, use the identically-named function instead.
macro_rules! expect_pending_htlcs_forwardable_conditions {
- ($node: expr, $expected_failures: expr) => {{
- let expected_failures = $expected_failures;
- let events = $node.node.get_and_clear_pending_events();
- match events[0] {
- $crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
- _ => panic!("Unexpected event {:?}", events),
- };
-
- let count = expected_failures.len() + 1;
- assert_eq!(events.len(), count);
-
- if expected_failures.len() > 0 {
- expect_htlc_handling_failed_destinations!(events, expected_failures)
- }
- }}
+ ($node: expr, $expected_failures: expr) => {
+ $crate::ln::functional_test_utils::expect_pending_htlcs_forwardable_conditions($node.node.get_and_clear_pending_events(), &$expected_failures);
+ }
}
#[macro_export]
}}
}
+/// Checks that an [`Event::PendingHTLCsForwardable`] is available in the given events and, if
+/// there are any [`Event::HTLCHandlingFailed`] events their [`HTLCDestination`] is included in the
+/// `expected_failures` set.
+pub fn expect_pending_htlcs_forwardable_conditions(events: Vec<Event>, expected_failures: &[HTLCDestination]) {
+ match events[0] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event {:?}", events),
+ };
+
+ let count = expected_failures.len() + 1;
+ assert_eq!(events.len(), count);
+
+ if expected_failures.len() > 0 {
+ expect_htlc_handling_failed_destinations!(events, expected_failures)
+ }
+}
+
#[macro_export]
/// Clears (and ignores) a PendingHTLCsForwardable event
+///
+/// Don't use this, call [`expect_pending_htlcs_forwardable_conditions()`] with an empty failure
+/// set instead.
macro_rules! expect_pending_htlcs_forwardable_ignore {
- ($node: expr) => {{
- expect_pending_htlcs_forwardable_conditions!($node, vec![]);
- }};
+ ($node: expr) => {
+ $crate::ln::functional_test_utils::expect_pending_htlcs_forwardable_conditions($node.node.get_and_clear_pending_events(), &[]);
+ }
}
#[macro_export]
/// Clears (and ignores) PendingHTLCsForwardable and HTLCHandlingFailed events
+///
+/// Don't use this, call [`expect_pending_htlcs_forwardable_conditions()`] instead.
macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore {
- ($node: expr, $expected_failures: expr) => {{
- expect_pending_htlcs_forwardable_conditions!($node, $expected_failures);
- }};
+ ($node: expr, $expected_failures: expr) => {
+ $crate::ln::functional_test_utils::expect_pending_htlcs_forwardable_conditions($node.node.get_and_clear_pending_events(), &$expected_failures);
+ }
}
#[macro_export]
/// Handles a PendingHTLCsForwardable event
macro_rules! expect_pending_htlcs_forwardable {
($node: expr) => {{
- expect_pending_htlcs_forwardable_ignore!($node);
+ $crate::ln::functional_test_utils::expect_pending_htlcs_forwardable_conditions($node.node.get_and_clear_pending_events(), &[]);
$node.node.process_pending_htlc_forwards();
// Ensure process_pending_htlc_forwards is idempotent.
/// Handles a PendingHTLCsForwardable and HTLCHandlingFailed event
macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed {
($node: expr, $expected_failures: expr) => {{
- expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($node, $expected_failures);
+ $crate::ln::functional_test_utils::expect_pending_htlcs_forwardable_conditions($node.node.get_and_clear_pending_events(), &$expected_failures);
$node.node.process_pending_htlc_forwards();
// Ensure process_pending_htlc_forwards is idempotent.
};
($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */, false /* return extra message */, true /* return last RAA */) => {
{
- check_added_monitors!($node_a, 0);
+ $crate::ln::functional_test_utils::check_added_monitors(&$node_a, 0);
assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
$node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed);
- check_added_monitors!($node_a, 1);
+ check_added_monitors(&$node_a, 1);
let (extra_msg_option, bs_revoke_and_ack) = $crate::ln::functional_test_utils::do_main_commitment_signed_dance(&$node_a, &$node_b, $fail_backwards);
assert!(extra_msg_option.is_none());
bs_revoke_and_ack
{
let (extra_msg_option, bs_revoke_and_ack) = $crate::ln::functional_test_utils::do_main_commitment_signed_dance(&$node_a, &$node_b, $fail_backwards);
$node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack);
- check_added_monitors!($node_a, 1);
+ $crate::ln::functional_test_utils::check_added_monitors(&$node_a, 1);
extra_msg_option
}
};
check_added_monitors!(node_b, 1);
node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &as_commitment_signed);
let (bs_revoke_and_ack, extra_msg_option) = {
- let events = node_b.node.get_and_clear_pending_msg_events();
+ let mut events = node_b.node.get_and_clear_pending_msg_events();
assert!(events.len() <= 2);
- let (node_a_event, events) = remove_first_msg_event_to_node(&node_a.node.get_our_node_id(), &events);
+ let node_a_event = remove_first_msg_event_to_node(&node_a.node.get_our_node_id(), &mut events);
(match node_a_event {
MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
assert_eq!(*node_id, node_a.node.get_our_node_id());
}
/// Get a payment preimage and hash.
+pub fn get_payment_preimage_hash(recipient: &Node, min_value_msat: Option<u64>, min_final_cltv_expiry_delta: Option<u16>) -> (PaymentPreimage, PaymentHash, PaymentSecret) {
+ let mut payment_count = recipient.network_payment_count.borrow_mut();
+ let payment_preimage = PaymentPreimage([*payment_count; 32]);
+ *payment_count += 1;
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
+ let payment_secret = recipient.node.create_inbound_payment_for_hash(payment_hash, min_value_msat, 7200, min_final_cltv_expiry_delta).unwrap();
+ (payment_preimage, payment_hash, payment_secret)
+}
+
+/// Get a payment preimage and hash.
+///
+/// Don't use this, use the identically-named function instead.
#[macro_export]
macro_rules! get_payment_preimage_hash {
($dest_node: expr) => {
- {
- get_payment_preimage_hash!($dest_node, None)
- }
+ get_payment_preimage_hash!($dest_node, None)
};
($dest_node: expr, $min_value_msat: expr) => {
- {
- crate::get_payment_preimage_hash!($dest_node, $min_value_msat, None)
- }
+ crate::get_payment_preimage_hash!($dest_node, $min_value_msat, None)
};
($dest_node: expr, $min_value_msat: expr, $min_final_cltv_expiry_delta: expr) => {
- {
- use bitcoin::hashes::Hash as _;
- let mut payment_count = $dest_node.network_payment_count.borrow_mut();
- let payment_preimage = $crate::ln::PaymentPreimage([*payment_count; 32]);
- *payment_count += 1;
- let payment_hash = $crate::ln::PaymentHash(
- bitcoin::hashes::sha256::Hash::hash(&payment_preimage.0[..]).into_inner());
- let payment_secret = $dest_node.node.create_inbound_payment_for_hash(payment_hash, $min_value_msat, 7200, $min_final_cltv_expiry_delta).unwrap();
- (payment_preimage, payment_hash, payment_secret)
- }
+ $crate::ln::functional_test_utils::get_payment_preimage_hash(&$dest_node, $min_value_msat, $min_final_cltv_expiry_delta)
};
}
+/// Gets a route from the given sender to the node described in `payment_params`.
+pub fn get_route(send_node: &Node, payment_params: &PaymentParameters, recv_value: u64, final_cltv_expiry_delta: u32) -> Result<Route, msgs::LightningError> {
+ let scorer = TestScorer::new();
+ let keys_manager = TestKeysInterface::new(&[0u8; 32], bitcoin::network::constants::Network::Testnet);
+ let random_seed_bytes = keys_manager.get_secure_random_bytes();
+ router::get_route(
+ &send_node.node.get_our_node_id(), payment_params, &send_node.network_graph.read_only(),
+ Some(&send_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
+ recv_value, final_cltv_expiry_delta, send_node.logger, &scorer, &random_seed_bytes
+ )
+}
+
+/// Gets a route from the given sender to the node described in `payment_params`.
+///
+/// Don't use this, use the identically-named function instead.
#[macro_export]
macro_rules! get_route {
- ($send_node: expr, $payment_params: expr, $recv_value: expr, $cltv: expr) => {{
- use $crate::chain::keysinterface::EntropySource;
- let scorer = $crate::util::test_utils::TestScorer::with_penalty(0);
- let keys_manager = $crate::util::test_utils::TestKeysInterface::new(&[0u8; 32], bitcoin::network::constants::Network::Testnet);
- let random_seed_bytes = keys_manager.get_secure_random_bytes();
- $crate::routing::router::get_route(
- &$send_node.node.get_our_node_id(), &$payment_params, &$send_node.network_graph.read_only(),
- Some(&$send_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
- $recv_value, $cltv, $send_node.logger, &scorer, &random_seed_bytes
- )
- }}
+ ($send_node: expr, $payment_params: expr, $recv_value: expr, $cltv: expr) => {
+ $crate::ln::functional_test_utils::get_route(&$send_node, &$payment_params, $recv_value, $cltv)
+ }
}
#[cfg(test)]
$crate::get_route_and_payment_hash!($send_node, $recv_node, payment_params, $recv_value, TEST_FINAL_CLTV)
}};
($send_node: expr, $recv_node: expr, $payment_params: expr, $recv_value: expr, $cltv: expr) => {{
- let (payment_preimage, payment_hash, payment_secret) = $crate::get_payment_preimage_hash!($recv_node, Some($recv_value));
- let route = $crate::get_route!($send_node, $payment_params, $recv_value, $cltv);
+ let (payment_preimage, payment_hash, payment_secret) =
+ $crate::ln::functional_test_utils::get_payment_preimage_hash(&$recv_node, Some($recv_value), None);
+ let route = $crate::ln::functional_test_utils::get_route(&$send_node, &$payment_params, $recv_value, $cltv);
(route.unwrap(), payment_hash, payment_preimage, payment_secret)
}}
}
}
pub fn expect_payment_failed_conditions_event<'a, 'b, 'c, 'd, 'e>(
- node: &'a Node<'b, 'c, 'd>, payment_failed_event: Event, expected_payment_hash: PaymentHash,
+ payment_failed_events: Vec<Event>, expected_payment_hash: PaymentHash,
expected_payment_failed_permanently: bool, conditions: PaymentFailedConditions<'e>
) {
- let expected_payment_id = match payment_failed_event {
- Event::PaymentPathFailed { payment_hash, payment_failed_permanently, path, retry, payment_id, network_update, short_channel_id,
+ if conditions.expected_mpp_parts_remain { assert_eq!(payment_failed_events.len(), 1); } else { assert_eq!(payment_failed_events.len(), 2); }
+ let expected_payment_id = match &payment_failed_events[0] {
+ Event::PaymentPathFailed { payment_hash, payment_failed_permanently, payment_id, failure,
#[cfg(test)]
error_code,
#[cfg(test)]
error_data, .. } => {
- assert_eq!(payment_hash, expected_payment_hash, "unexpected payment_hash");
- assert_eq!(payment_failed_permanently, expected_payment_failed_permanently, "unexpected payment_failed_permanently value");
- assert!(retry.is_some(), "expected retry.is_some()");
- assert_eq!(retry.as_ref().unwrap().final_value_msat, path.last().unwrap().fee_msat, "Retry amount should match last hop in path");
- assert_eq!(retry.as_ref().unwrap().payment_params.payee_pubkey, path.last().unwrap().pubkey, "Retry payee node_id should match last hop in path");
- if let Some(scid) = short_channel_id {
- assert!(retry.as_ref().unwrap().payment_params.previously_failed_channels.contains(&scid));
- }
-
+ assert_eq!(*payment_hash, expected_payment_hash, "unexpected payment_hash");
+ assert_eq!(*payment_failed_permanently, expected_payment_failed_permanently, "unexpected payment_failed_permanently value");
#[cfg(test)]
{
assert!(error_code.is_some(), "expected error_code.is_some() = true");
}
if let Some(chan_closed) = conditions.expected_blamed_chan_closed {
- match network_update {
- Some(NetworkUpdate::ChannelUpdateMessage { ref msg }) if !chan_closed => {
- if let Some(scid) = conditions.expected_blamed_scid {
- assert_eq!(msg.contents.short_channel_id, scid);
- }
- const CHAN_DISABLED_FLAG: u8 = 2;
- assert_eq!(msg.contents.flags & CHAN_DISABLED_FLAG, 0);
- },
- Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) if chan_closed => {
- if let Some(scid) = conditions.expected_blamed_scid {
- assert_eq!(short_channel_id, scid);
- }
- assert!(is_permanent);
- },
- Some(_) => panic!("Unexpected update type"),
- None => panic!("Expected update"),
- }
+ if let PathFailure::OnPath { network_update: Some(upd) } = failure {
+ match upd {
+ NetworkUpdate::ChannelUpdateMessage { ref msg } if !chan_closed => {
+ if let Some(scid) = conditions.expected_blamed_scid {
+ assert_eq!(msg.contents.short_channel_id, scid);
+ }
+ const CHAN_DISABLED_FLAG: u8 = 2;
+ assert_eq!(msg.contents.flags & CHAN_DISABLED_FLAG, 0);
+ },
+ NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } if chan_closed => {
+ if let Some(scid) = conditions.expected_blamed_scid {
+ assert_eq!(*short_channel_id, scid);
+ }
+ assert!(is_permanent);
+ },
+ _ => panic!("Unexpected update type"),
+ }
+ } else { panic!("Expected network update"); }
}
payment_id.unwrap()
_ => panic!("Unexpected event"),
};
if !conditions.expected_mpp_parts_remain {
- node.node.abandon_payment(expected_payment_id);
- let events = node.node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- match events[0] {
+ match &payment_failed_events[1] {
Event::PaymentFailed { ref payment_hash, ref payment_id } => {
assert_eq!(*payment_hash, expected_payment_hash, "unexpected second payment_hash");
assert_eq!(*payment_id, expected_payment_id);
node: &'a Node<'b, 'c, 'd>, expected_payment_hash: PaymentHash, expected_payment_failed_permanently: bool,
conditions: PaymentFailedConditions<'e>
) {
- let mut events = node.node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- expect_payment_failed_conditions_event(node, events.pop().unwrap(), expected_payment_hash, expected_payment_failed_permanently, conditions);
+ let events = node.node.get_and_clear_pending_events();
+ expect_payment_failed_conditions_event(events, expected_payment_hash, expected_payment_failed_permanently, conditions);
}
pub fn send_along_route_with_secret<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, route: Route, expected_paths: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: PaymentSecret) -> PaymentId {
let mut events = origin_node.node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), expected_route.len());
for (path_idx, expected_path) in expected_route.iter().enumerate() {
- let (ev, updated_events) = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &events);
- events = updated_events;
+ let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
// Once we've gotten through all the HTLCs, the last one should result in a
// PaymentClaimable (but each previous one should not!), .
let expect_payment = path_idx == expected_route.len() - 1;
} else {
for expected_path in expected_paths.iter() {
// For MPP payments, we always want the message to the first node in the path.
- let (ev, updated_events) = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &events);
+ let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
per_path_msgs.push(msgs_from_ev!(&ev));
- events = updated_events;
}
}
pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) {
let payment_params = PaymentParameters::from_node_id(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV)
.with_features(expected_route.last().unwrap().node.invoice_features());
- let route = get_route!(origin_node, payment_params, recv_value, TEST_FINAL_CLTV).unwrap();
+ let route = get_route(origin_node, &payment_params, recv_value, TEST_FINAL_CLTV).unwrap();
assert_eq!(route.paths.len(), 1);
assert_eq!(route.paths[0].len(), expected_route.len());
for (node, hop) in expected_route.iter().zip(route.paths[0].iter()) {
let payment_params = PaymentParameters::from_node_id(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV)
.with_features(expected_route.last().unwrap().node.invoice_features());
let network_graph = origin_node.network_graph.read_only();
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let seed = [0u8; 32];
let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
- let route = get_route(
+ let route = router::get_route(
&origin_node.node.get_our_node_id(), &payment_params, &network_graph,
None, recv_value, TEST_FINAL_CLTV, origin_node.logger, &scorer, &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
assert!(err.contains("Cannot send value that would put us over the max HTLC value in flight our peer will accept")));
}
-pub fn send_payment<'a, 'b, 'c>(origin: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) {
- let our_payment_preimage = route_payment(&origin, expected_route, recv_value).0;
- claim_payment(&origin, expected_route, our_payment_preimage);
+pub fn send_payment<'a, 'b, 'c>(origin: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) {
+ let res = route_payment(&origin, expected_route, recv_value);
+ claim_payment(&origin, expected_route, res.0);
+ res
}
pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_hash: PaymentHash) {
}
pub fn pass_failed_payment_back<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths_slice: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_hash: PaymentHash) {
- let expected_payment_id = pass_failed_payment_back_no_abandon(origin_node, expected_paths_slice, skip_last, our_payment_hash);
- if !skip_last {
- origin_node.node.abandon_payment(expected_payment_id.unwrap());
- let events = origin_node.node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- Event::PaymentFailed { ref payment_hash, ref payment_id } => {
- assert_eq!(*payment_hash, our_payment_hash, "unexpected second payment_hash");
- assert_eq!(*payment_id, expected_payment_id.unwrap());
- }
- _ => panic!("Unexpected second event"),
- }
- }
-}
-
-pub fn pass_failed_payment_back_no_abandon<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths_slice: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_hash: PaymentHash) -> Option<PaymentId> {
let mut expected_paths: Vec<_> = expected_paths_slice.iter().collect();
check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len());
per_path_msgs.sort_unstable_by(|(_, node_id_a), (_, node_id_b)| node_id_a.cmp(node_id_b));
expected_paths.sort_unstable_by(|path_a, path_b| path_a[path_a.len() - 2].node.get_our_node_id().cmp(&path_b[path_b.len() - 2].node.get_our_node_id()));
- let mut expected_payment_id = None;
-
for (i, (expected_route, (path_msgs, next_hop))) in expected_paths.iter().zip(per_path_msgs.drain(..)).enumerate() {
let mut next_msgs = Some(path_msgs);
let mut expected_next_node = next_hop;
assert!(origin_node.node.get_and_clear_pending_msg_events().is_empty());
commitment_signed_dance!(origin_node, prev_node, next_msgs.as_ref().unwrap().1, false);
let events = origin_node.node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- expected_payment_id = Some(match events[0] {
- Event::PaymentPathFailed { payment_hash, payment_failed_permanently, all_paths_failed, ref path, ref payment_id, .. } => {
+ if i == expected_paths.len() - 1 { assert_eq!(events.len(), 2); } else { assert_eq!(events.len(), 1); }
+
+ let expected_payment_id = match events[0] {
+ Event::PaymentPathFailed { payment_hash, payment_failed_permanently, ref path, ref payment_id, .. } => {
assert_eq!(payment_hash, our_payment_hash);
assert!(payment_failed_permanently);
- assert_eq!(all_paths_failed, i == expected_paths.len() - 1);
for (idx, hop) in expected_route.iter().enumerate() {
assert_eq!(hop.node.get_our_node_id(), path[idx].pubkey);
}
payment_id.unwrap()
},
_ => panic!("Unexpected event"),
- });
+ };
+ if i == expected_paths.len() - 1 {
+ match events[1] {
+ Event::PaymentFailed { ref payment_hash, ref payment_id } => {
+ assert_eq!(*payment_hash, our_payment_hash, "unexpected second payment_hash");
+ assert_eq!(*payment_id, expected_payment_id);
+ }
+ _ => panic!("Unexpected second event"),
+ }
+ }
}
}
assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_events().is_empty());
assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(expected_paths[0].last().unwrap(), 0);
-
- expected_payment_id
}
pub fn fail_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], our_payment_hash: PaymentHash) {
let persister = test_utils::TestPersister::new();
let seed = [i as u8; 32];
let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
+ let scorer = Mutex::new(test_utils::TestScorer::new());
- chan_mon_cfgs.push(TestChanMonCfg { tx_broadcaster, fee_estimator, chain_source, logger, persister, keys_manager });
+ chan_mon_cfgs.push(TestChanMonCfg { tx_broadcaster, fee_estimator, chain_source, logger, persister, keys_manager, scorer });
}
chan_mon_cfgs
for i in 0..node_count {
let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[i].chain_source), &chanmon_cfgs[i].tx_broadcaster, &chanmon_cfgs[i].logger, &chanmon_cfgs[i].fee_estimator, &chanmon_cfgs[i].persister, &chanmon_cfgs[i].keys_manager);
- let network_graph = Arc::new(NetworkGraph::new(chanmon_cfgs[i].chain_source.genesis_hash, &chanmon_cfgs[i].logger));
+ let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[i].logger));
let seed = [i as u8; 32];
nodes.push(NodeCfg {
chain_source: &chanmon_cfgs[i].chain_source,
logger: &chanmon_cfgs[i].logger,
tx_broadcaster: &chanmon_cfgs[i].tx_broadcaster,
fee_estimator: &chanmon_cfgs[i].fee_estimator,
- router: test_utils::TestRouter::new(network_graph.clone()),
+ router: test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[i].scorer),
chain_monitor,
keys_manager: &chanmon_cfgs[i].keys_manager,
node_seed: seed,
let network = Network::Testnet;
let params = ChainParameters {
network,
- best_block: BestBlock::from_genesis(network),
+ best_block: BestBlock::from_network(network),
};
let node = ChannelManager::new(cfgs[i].fee_estimator, &cfgs[i].chain_monitor, cfgs[i].tx_broadcaster, &cfgs[i].router, cfgs[i].logger, cfgs[i].keys_manager,
cfgs[i].keys_manager, cfgs[i].keys_manager, if node_config[i].is_some() { node_config[i].clone().unwrap() } else { test_default_channel_config() }, params);
for i in 0..node_count {
for j in (i+1)..node_count {
- nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: nodes[j].override_init_features.borrow().clone().unwrap_or_else(|| nodes[j].node.init_features()), remote_network_address: None }).unwrap();
- nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: nodes[i].override_init_features.borrow().clone().unwrap_or_else(|| nodes[i].node.init_features()), remote_network_address: None }).unwrap();
+ nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: nodes[j].override_init_features.borrow().clone().unwrap_or_else(|| nodes[j].node.init_features()), remote_network_address: None }, true).unwrap();
+ nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: nodes[i].override_init_features.borrow().clone().unwrap_or_else(|| nodes[i].node.init_features()), remote_network_address: None }, false).unwrap();
}
}
/// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
/// for claims/fails they are separated out.
pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_channel_ready: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
- node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: node_b.node.init_features(), remote_network_address: None }).unwrap();
+ node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: node_b.node.init_features(), remote_network_address: None }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
- node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: node_a.node.init_features(), remote_network_address: None }).unwrap();
+ node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: node_a.node.init_features(), remote_network_address: None }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a);
if send_channel_ready.0 {
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
use crate::util::enforcing_trait_impls::EnforcingSigner;
use crate::util::test_utils;
-use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
+use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination};
use crate::util::errors::APIError;
use crate::util::ser::{Writeable, ReadableArgs};
use crate::util::config::UserConfig;
},
_ => panic!()
}
- let events = nodes[1].node.get_and_clear_pending_msg_events();
+ let mut events = nodes[1].node.get_and_clear_pending_msg_events();
{
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
assert_eq!(added_monitors.len(), 2);
}
assert_eq!(events.len(), 3);
- let (nodes_2_event, events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events);
- let (nodes_0_event, events) = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &events);
+ let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
+ let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
match nodes_2_event {
MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
let events = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(events.len(), if deliver_bs_raa { 2 + nodes.len() - 1 } else { 3 + nodes.len() });
+ assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
match events[0] {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
_ => panic!("Unexepected event"),
},
_ => panic!("Unexpected event"),
}
- if !deliver_bs_raa {
- match events[2] {
- Event::PendingHTLCsForwardable { .. } => { },
- _ => panic!("Unexpected event"),
- };
- nodes[1].node.abandon_payment(PaymentId(fourth_payment_hash.0));
- let payment_failed_events = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(payment_failed_events.len(), 1);
- match payment_failed_events[0] {
- Event::PaymentFailed { ref payment_hash, .. } => {
- assert_eq!(*payment_hash, fourth_payment_hash);
- },
- _ => panic!("Unexpected event"),
- }
+ match events[2] {
+ Event::PaymentFailed { ref payment_hash, .. } => {
+ assert_eq!(*payment_hash, fourth_payment_hash);
+ },
+ _ => panic!("Unexpected event"),
}
+
nodes[1].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[1], 1);
let mut events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
- let events = if deliver_bs_raa {
- let (nodes_2_event, events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events);
+ if deliver_bs_raa {
+ let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
match nodes_2_event {
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
},
_ => panic!("Unexpected event"),
}
- events
- } else { events };
+ }
- let (nodes_2_event, events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events);
+ let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
match nodes_2_event {
MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => {
assert_eq!(channel_id, chan_2.2);
_ => panic!("Unexpected event"),
}
- let (nodes_0_event, events) = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &events);
+ let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
match nodes_0_event {
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
assert!(update_add_htlcs.is_empty());
commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 3);
+ assert_eq!(events.len(), 6);
match events[0] {
- Event::PaymentPathFailed { ref payment_hash, ref network_update, .. } => {
+ Event::PaymentPathFailed { ref payment_hash, ref failure, .. } => {
assert!(failed_htlcs.insert(payment_hash.0));
// If we delivered B's RAA we got an unknown preimage error, not something
// that we should update our routing table for.
if !deliver_bs_raa {
- assert!(network_update.is_some());
+ if let PathFailure::OnPath { network_update: Some(_) } = failure { } else { panic!("Unexpected path failure") }
}
},
_ => panic!("Unexpected event"),
}
match events[1] {
- Event::PaymentPathFailed { ref payment_hash, ref network_update, .. } => {
- assert!(failed_htlcs.insert(payment_hash.0));
- assert!(network_update.is_some());
+ Event::PaymentFailed { ref payment_hash, .. } => {
+ assert_eq!(*payment_hash, first_payment_hash);
},
_ => panic!("Unexpected event"),
}
match events[2] {
- Event::PaymentPathFailed { ref payment_hash, ref network_update, .. } => {
+ Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
assert!(failed_htlcs.insert(payment_hash.0));
- assert!(network_update.is_some());
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[3] {
+ Event::PaymentFailed { ref payment_hash, .. } => {
+ assert_eq!(*payment_hash, second_payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[4] {
+ Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => {
+ assert!(failed_htlcs.insert(payment_hash.0));
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[5] {
+ Event::PaymentFailed { ref payment_hash, .. } => {
+ assert_eq!(*payment_hash, third_payment_hash);
},
_ => panic!("Unexpected event"),
}
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
}
let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 2);
+ assert_eq!(events.len(), 3);
// Check that Alice fails backward the pending HTLC from the second payment.
match events[0] {
Event::PaymentPathFailed { payment_hash, .. } => {
_ => panic!("Unexpected event"),
}
match events[1] {
+ Event::PaymentFailed { payment_hash, .. } => {
+ assert_eq!(payment_hash, failed_payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[2] {
Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => {
assert_eq!(err, "Remote side tried to send a 0-msat HTLC");
},
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
expect_payment_path_successful!(nodes[0]);
// Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
// disconnected before the funding transaction was broadcasted.
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
create_announced_chan_between_nodes(&nodes, 0, 1);
create_announced_chan_between_nodes(&nodes, 1, 2);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
{
let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 3);
+ assert_eq!(events.len(), 4);
match events[0] {
Event::PaymentSent { payment_preimage, payment_hash, .. } => {
assert_eq!(payment_preimage, payment_preimage_3);
_ => panic!("Unexpected event"),
}
match events[1] {
+ Event::PaymentPathSuccessful { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ match events[2] {
Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => {
assert_eq!(payment_hash, payment_hash_5);
assert!(payment_failed_permanently);
},
_ => panic!("Unexpected event"),
}
- match events[2] {
- Event::PaymentPathSuccessful { .. } => {},
+ match events[3] {
+ Event::PaymentFailed { payment_hash, .. } => {
+ assert_eq!(payment_hash, payment_hash_5);
+ },
_ => panic!("Unexpected event"),
}
}
}
}
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
if messages_delivered < 3 {
if simulate_broken_lnd {
// lnd has a long-standing bug where they send a channel_ready prior to a
};
}
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
nodes[1].node.process_pending_htlc_forwards();
}
}
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
if messages_delivered < 2 {
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
if messages_delivered < 1 {
if messages_delivered == 1 || messages_delivered == 2 {
expect_payment_path_successful!(nodes[0]);
}
-
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ if messages_delivered <= 5 {
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ }
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
if messages_delivered > 2 {
_ => panic!("Unexpected event"),
}
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
let payment_id = PaymentId([42; 32]);
let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &route).unwrap();
- nodes[0].node.send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
_ => panic!("Unexpected event"),
}
check_added_monitors!(nodes[1], 1);
- let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+ let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 3);
- let (nodes_2_event, msg_events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &msg_events);
- let (nodes_0_event, msg_events) = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &msg_events);
+ let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut msg_events);
+ let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events);
match nodes_2_event {
MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
}
let as_events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(as_events.len(), if announce_latest { 5 } else { 3 });
+ assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
let mut as_failds = HashSet::new();
let mut as_updates = 0;
for event in as_events.iter() {
- if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref network_update, .. } = event {
+ if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
assert!(as_failds.insert(*payment_hash));
if *payment_hash != payment_hash_2 {
assert_eq!(*payment_failed_permanently, deliver_last_raa);
} else {
assert!(!payment_failed_permanently);
}
- if network_update.is_some() {
+ if let PathFailure::OnPath { network_update: Some(_) } = failure {
as_updates += 1;
}
+ } else if let &Event::PaymentFailed { .. } = event {
} else { panic!("Unexpected event"); }
}
assert!(as_failds.contains(&payment_hash_1));
assert!(as_failds.contains(&payment_hash_6));
let bs_events = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(bs_events.len(), if announce_latest { 4 } else { 3 });
+ assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
let mut bs_failds = HashSet::new();
let mut bs_updates = 0;
for event in bs_events.iter() {
- if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref network_update, .. } = event {
+ if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
assert!(bs_failds.insert(*payment_hash));
if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
assert_eq!(*payment_failed_permanently, deliver_last_raa);
} else {
assert!(!payment_failed_permanently);
}
- if network_update.is_some() {
+ if let PathFailure::OnPath { network_update: Some(_) } = failure {
bs_updates += 1;
}
+ } else if let &Event::PaymentFailed { .. } = event {
} else { panic!("Unexpected event"); }
}
assert!(bs_failds.contains(&payment_hash_1));
let seed = [42; 32];
let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
- let network_graph = Arc::new(NetworkGraph::new(chanmon_cfgs[0].chain_source.genesis_hash, &chanmon_cfgs[0].logger));
- let router = test_utils::TestRouter::new(network_graph.clone());
+ let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
+ let scorer = Mutex::new(test_utils::TestScorer::new());
+ let router = test_utils::TestRouter::new(network_graph.clone(), &scorer);
let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
node_cfgs.remove(0);
// Check that the payment failed to be sent out.
let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
+ assert_eq!(events.len(), 2);
match &events[0] {
- &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, ref network_update, ref all_paths_failed, ref short_channel_id, .. } => {
+ &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap());
assert_eq!(our_payment_hash.clone(), *payment_hash);
assert_eq!(*payment_failed_permanently, false);
- assert_eq!(*all_paths_failed, true);
- assert_eq!(*network_update, None);
assert_eq!(*short_channel_id, Some(route.paths[0][0].short_channel_id));
},
_ => panic!("Unexpected event"),
}
+ match &events[1] {
+ &Event::PaymentFailed { ref payment_hash, .. } => {
+ assert_eq!(our_payment_hash.clone(), *payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
}
// Test that if multiple HTLCs are released from the holding cell and one is
// Check that the second payment failed to be sent out.
let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
+ assert_eq!(events.len(), 2);
match &events[0] {
- &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, ref network_update, ref all_paths_failed, ref short_channel_id, .. } => {
+ &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => {
assert_eq!(payment_id_2, *payment_id.as_ref().unwrap());
assert_eq!(payment_hash_2.clone(), *payment_hash);
assert_eq!(*payment_failed_permanently, false);
- assert_eq!(*all_paths_failed, true);
- assert_eq!(*network_update, None);
assert_eq!(*short_channel_id, Some(route_2.paths[0][0].short_channel_id));
},
_ => panic!("Unexpected event"),
}
+ match &events[1] {
+ &Event::PaymentFailed { ref payment_hash, .. } => {
+ assert_eq!(payment_hash_2.clone(), *payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
// Complete the first payment and the RAA from the fee update.
let (payment_event, send_raa_event) = {
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
//Disconnect and Reconnect
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
}
let events_5 = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events_5.len(), 1);
+ assert_eq!(events_5.len(), 2);
// Expect a PaymentPathFailed event with a ChannelFailure network update for the channel between
// the node originating the error to its next hop.
match events_5[0] {
- Event::PaymentPathFailed { network_update:
- Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }), error_code, ..
+ Event::PaymentPathFailed { error_code, failure: PathFailure::OnPath { network_update: Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) }, ..
} => {
assert_eq!(short_channel_id, chan_2.0.contents.short_channel_id);
assert!(is_permanent);
},
_ => panic!("Unexpected event"),
}
+ match events_5[1] {
+ Event::PaymentFailed { payment_hash, .. } => {
+ assert_eq!(payment_hash, our_payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
// TODO: Test actual removal of channel from NetworkGraph when it's implemented.
}
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
let events = nodes[0].node.get_and_clear_pending_events();
// Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx
- assert_eq!(events.len(), 2);
+ assert_eq!(events.len(), 4);
let mut first_failed = false;
for event in events {
match event {
} else {
assert_eq!(payment_hash, payment_hash_2);
}
- }
+ },
+ Event::PaymentFailed { .. } => {}
_ => panic!("Unexpected event"),
}
}
// Create some initial channels
create_announced_chan_between_nodes(&nodes, 0, 1);
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV).with_features(nodes[1].node.invoice_features());
let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None, 10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
create_announced_chan_between_nodes(&nodes, 0, 1);
// Disconnect peers
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
nodes[0].node.timer_tick_occurred(); // Enabled -> DisabledStaged
nodes[0].node.timer_tick_occurred(); // DisabledStaged -> Disabled
}
}
// Reconnect peers
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 3);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 3);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
// Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_features(nodes[1].node.invoice_features());
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None,
3_000_000, 50, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
let persister = test_utils::TestPersister::new();
let watchtower = {
- let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
- let mut w = test_utils::TestVecWriter(Vec::new());
- monitor.write(&mut w).unwrap();
- let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
- &mut io::Cursor::new(&w.0), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
- assert!(new_monitor == *monitor);
+ let new_monitor = {
+ let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
+ let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+ &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
+ assert!(new_monitor == *monitor);
+ new_monitor
+ };
let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
watchtower
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
- if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+ if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
let persister = test_utils::TestPersister::new();
let watchtower_alice = {
- let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
- let mut w = test_utils::TestVecWriter(Vec::new());
- monitor.write(&mut w).unwrap();
- let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
- &mut io::Cursor::new(&w.0), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
- assert!(new_monitor == *monitor);
+ let new_monitor = {
+ let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
+ let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+ &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
+ assert!(new_monitor == *monitor);
+ new_monitor
+ };
let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
watchtower
let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
let persister = test_utils::TestPersister::new();
let watchtower_bob = {
- let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
- let mut w = test_utils::TestVecWriter(Vec::new());
- monitor.write(&mut w).unwrap();
- let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
- &mut io::Cursor::new(&w.0), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
- assert!(new_monitor == *monitor);
+ let new_monitor = {
+ let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
+ let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+ &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
+ assert!(new_monitor == *monitor);
+ new_monitor
+ };
let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
watchtower
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
- if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+ if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Watchtower Alice should already have seen the block and reject the update
assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
};
check_added_monitors!(nodes[0], 0);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
- // At this point we'll try to add a duplicate channel monitor, which will be rejected, but
- // still needs to be cleared here.
- check_added_monitors!(nodes[1], 1);
+ // At this point we'll look up if the channel_id is present and immediately fail the channel
+ // without trying to persist the `ChannelMonitor`.
+ check_added_monitors!(nodes[1], 0);
// ...still, nodes[1] will reject the duplicate channel.
{
_ => panic!("Unexpected event"),
}
// Note that at this point users of a standard PeerHandler will end up calling
- // peer_disconnected with no_connection_possible set to false, duplicating the
- // close-all-channels logic. That's OK, we don't want to end up not force-closing channels for
- // users with their own peer handling logic. We duplicate the call here, however.
+ // peer_disconnected.
assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
}
create_announced_chan_between_nodes(&nodes, 0, 1);
let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
- nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
- nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[1], true);
commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
let failure_events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(failure_events.len(), 2);
+ assert_eq!(failure_events.len(), 4);
if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
- if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); }
+ if let Event::PaymentFailed { .. } = failure_events[1] {} else { panic!(); }
+ if let Event::PaymentPathFailed { .. } = failure_events[2] {} else { panic!(); }
+ if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); }
} else {
// Let the second HTLC fail and claim the first
expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
- expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
+ expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new());
claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
}
if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
});
- let payment_params_opt = Some(payment_params);
let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
dup_route.paths.push(route.paths[1].clone());
nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(our_payment_secret), payment_id, &dup_route).unwrap()
};
- {
- nodes[0].node.send_payment_along_path(&route.paths[0], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
- check_added_monitors!(nodes[0], 1);
+ nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ {
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
}
assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
- {
- nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
- check_added_monitors!(nodes[0], 1);
+ nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash, &Some(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ {
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let payment_event = SendEvent::from_event(events.pop().unwrap());
expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
- nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[2]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[2]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
- claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
+ do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 3);
+ match events[0] {
+ Event::PaymentSent { payment_hash, .. } => { // The payment was abandoned earlier, so the fee paid will be None
+ assert_eq!(payment_hash, our_payment_hash);
+ },
+ _ => panic!("Unexpected event")
+ }
+ match events[1] {
+ Event::PaymentPathSuccessful { payment_hash, .. } => {
+ assert_eq!(payment_hash.unwrap(), our_payment_hash);
+ },
+ _ => panic!("Unexpected event")
+ }
+ match events[2] {
+ Event::PaymentPathSuccessful { payment_hash, .. } => {
+ assert_eq!(payment_hash.unwrap(), our_payment_hash);
+ },
+ _ => panic!("Unexpected event")
+ }
}
#[test]
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
final_value_msat: 10000,
- final_cltv_expiry_delta: 40,
};
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
let payer_pubkey = nodes[0].node.get_our_node_id();
let payee_pubkey = nodes[1].node.get_our_node_id();
- nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
- nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
final_value_msat: 10000,
- final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph.clone();
let first_hops = nodes[0].node.list_usable_channels();
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
- let (node_1_msgs, _events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &events);
+ let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
// At this point nodes[3] has received one half of the payment, and the user goes to handle
test_spendable_output(&nodes[1], &as_revoked_txn[0]);
let mut payment_failed_events = nodes[1].node.get_and_clear_pending_events();
- expect_payment_failed_conditions_event(&nodes[1], payment_failed_events.pop().unwrap(),
- dust_payment_hash, false, PaymentFailedConditions::new());
- expect_payment_failed_conditions_event(&nodes[1], payment_failed_events.pop().unwrap(),
+ expect_payment_failed_conditions_event(payment_failed_events[..2].to_vec(),
missing_htlc_payment_hash, false, PaymentFailedConditions::new());
- assert!(payment_failed_events.is_empty());
+ expect_payment_failed_conditions_event(payment_failed_events[2..].to_vec(),
+ dust_payment_hash, false, PaymentFailedConditions::new());
connect_blocks(&nodes[1], 1);
test_spendable_output(&nodes[1], &claim_txn[if confirm_htlc_spend_first { 2 } else { 3 }]);
fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &AnnouncementSignatures);
// Connection loss/reestablish:
- /// Indicates a connection to the peer failed/an existing connection was lost. If no connection
- /// is believed to be possible in the future (eg they're sending us messages we don't
- /// understand or indicate they require unknown feature bits), `no_connection_possible` is set
- /// and any outstanding channels should be failed.
- ///
- /// Note that in some rare cases this may be called without a corresponding
- /// [`Self::peer_connected`].
- fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool);
+ /// Indicates a connection to the peer failed/an existing connection was lost.
+ fn peer_disconnected(&self, their_node_id: &PublicKey);
/// Handle a peer reconnecting, possibly generating `channel_reestablish` message(s).
///
/// May return an `Err(())` if the features the peer supports are not sufficient to communicate
/// with us. Implementors should be somewhat conservative about doing so, however, as other
/// message handlers may still wish to communicate with this peer.
- fn peer_connected(&self, their_node_id: &PublicKey, msg: &Init) -> Result<(), ()>;
+ fn peer_connected(&self, their_node_id: &PublicKey, msg: &Init, inbound: bool) -> Result<(), ()>;
/// Handle an incoming `channel_reestablish` message from the given peer.
fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &ChannelReestablish);
/// May return an `Err(())` if the features the peer supports are not sufficient to communicate
/// with us. Implementors should be somewhat conservative about doing so, however, as other
/// message handlers may still wish to communicate with this peer.
- fn peer_connected(&self, their_node_id: &PublicKey, init: &Init) -> Result<(), ()>;
+ fn peer_connected(&self, their_node_id: &PublicKey, init: &Init, inbound: bool) -> Result<(), ()>;
/// Handles the reply of a query we initiated to learn about channels
/// for a given range of blocks. We can expect to receive one or more
/// replies to a single query.
/// list of `short_channel_id`s.
fn handle_query_short_channel_ids(&self, their_node_id: &PublicKey, msg: QueryShortChannelIds) -> Result<(), LightningError>;
+ // Handler queueing status:
+ /// Indicates that there are a large number of [`ChannelAnnouncement`] (or other) messages
+ /// pending some async action. While there is no guarantee of the rate of future messages, the
+ /// caller should seek to reduce the rate of new gossip messages handled, especially
+ /// [`ChannelAnnouncement`]s.
+ fn processing_queue_high(&self) -> bool;
+
// Handler information:
/// Gets the node feature flags which this handler itself supports. All available handlers are
/// queried similarly and their feature flags are OR'd together to form the [`NodeFeatures`]
/// May return an `Err(())` if the features the peer supports are not sufficient to communicate
/// with us. Implementors should be somewhat conservative about doing so, however, as other
/// message handlers may still wish to communicate with this peer.
- fn peer_connected(&self, their_node_id: &PublicKey, init: &Init) -> Result<(), ()>;
+ fn peer_connected(&self, their_node_id: &PublicKey, init: &Init, inbound: bool) -> Result<(), ()>;
/// Indicates a connection to the peer failed/an existing connection was lost. Allows handlers to
/// drop and refuse to forward onion messages to this peer.
- ///
- /// Note that in some rare cases this may be called without a corresponding
- /// [`Self::peer_connected`].
- fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool);
+ fn peer_disconnected(&self, their_node_id: &PublicKey);
// Handler information:
/// Gets the node feature flags which this handler itself supports. All available handlers are
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, ChannelUpdate};
use crate::ln::wire::Encode;
-use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
+use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure};
use crate::util::ser::{Writeable, Writer};
use crate::util::test_utils;
use crate::util::config::{UserConfig, ChannelConfig};
commitment_signed_dance!(nodes[0], nodes[1], update_1_0.commitment_signed, false, true);
let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- if let &Event::PaymentPathFailed { ref payment_failed_permanently, ref network_update, ref all_paths_failed, ref short_channel_id, ref error_code, .. } = &events[0] {
+ assert_eq!(events.len(), 2);
+ if let &Event::PaymentPathFailed { ref payment_failed_permanently, ref short_channel_id, ref error_code, failure: PathFailure::OnPath { ref network_update }, .. } = &events[0] {
assert_eq!(*payment_failed_permanently, !expected_retryable);
- assert_eq!(*all_paths_failed, true);
assert_eq!(*error_code, expected_error_code);
if expected_channel_update.is_some() {
match network_update {
} else {
panic!("Unexpected event");
}
- nodes[0].node.abandon_payment(payment_id);
- let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- match events[0] {
+ match events[1] {
Event::PaymentFailed { payment_hash: ev_payment_hash, payment_id: ev_payment_id } => {
assert_eq!(*payment_hash, ev_payment_hash);
assert_eq!(payment_id, ev_payment_id);
let short_channel_id = channels[1].0.contents.short_channel_id;
run_onion_failure_test("channel_disabled", 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {
// disconnect event to the channel between nodes[1] ~ nodes[2]
- nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
- nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
}, true, Some(UPDATE|20), Some(NetworkUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy(short_channel_id)}), Some(short_channel_id));
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_claimable!(nodes[1], payment_hash, payment_secret, payment_amount);
- nodes[1].node.fail_htlc_backwards_with_reason(&payment_hash, &failure_code);
+ nodes[1].node.fail_htlc_backwards_with_reason(&payment_hash, failure_code);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash }]);
check_added_monitors!(nodes[1], 1);
htlc_maximum_msat: None,
}
])]);
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let network_graph = $nodes[0].network_graph.read_only();
(get_route(
&$nodes[0].node.get_our_node_id(), &payment_params, &network_graph,
});
cur_value_msat += hop.fee_msat;
if cur_value_msat >= 21000000 * 100000000 * 1000 {
- return Err(APIError::InvalidRoute{err: "Channel fees overflowed?"});
+ return Err(APIError::InvalidRoute{err: "Channel fees overflowed?".to_owned()});
}
cur_cltv += hop.cltv_expiry_delta as u32;
if cur_cltv >= 500000000 {
- return Err(APIError::InvalidRoute{err: "Channel CLTV overflowed?"});
+ return Err(APIError::InvalidRoute{err: "Channel CLTV overflowed?".to_owned()});
}
last_short_channel_id = hop.short_channel_id;
}
use crate::chain::keysinterface::{EntropySource, NodeSigner, Recipient};
use crate::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channelmanager::{ChannelDetails, HTLCSource, IDEMPOTENCY_TIMEOUT_TICKS, MIN_HTLC_RELAY_HOLDING_CELL_MILLIS, PaymentId};
-use crate::ln::channelmanager::MIN_FINAL_CLTV_EXPIRY_DELTA as LDK_DEFAULT_MIN_FINAL_CLTV_EXPIRY_DELTA;
-use crate::ln::msgs::DecodeError;
+use crate::ln::channelmanager::{ChannelDetails, HTLCSource, IDEMPOTENCY_TIMEOUT_TICKS, PaymentId};
use crate::ln::onion_utils::HTLCFailReason;
use crate::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteHop, RouteParameters, RoutePath, Router};
use crate::util::errors::APIError;
use crate::util::time::Time;
#[cfg(all(not(feature = "no-std"), test))]
use crate::util::time::tests::SinceEpoch;
+use crate::util::ser::ReadableArgs;
use core::cmp;
use core::fmt::{self, Display, Formatter};
use core::ops::Deref;
-use core::time::Duration;
use crate::prelude::*;
use crate::sync::Mutex;
payment_hash: Option<PaymentHash>,
timer_ticks_without_htlcs: u8,
},
- /// When a payer gives up trying to retry a payment, they inform us, letting us generate a
- /// `PaymentFailed` event when all HTLCs have irrevocably failed. This avoids a number of race
- /// conditions in MPP-aware payment retriers (1), where the possibility of multiple
- /// `PaymentPathFailed` events with `all_paths_failed` can be pending at once, confusing a
- /// downstream event handler as to when a payment has actually failed.
- ///
- /// (1) <https://github.com/lightningdevkit/rust-lightning/issues/1164>
+ /// When we've decided to give up retrying a payment, we mark it as abandoned so we can eventually
+ /// generate a `PaymentFailed` event when all HTLCs have irrevocably failed.
Abandoned {
session_privs: HashSet<[u8; 32]>,
payment_hash: PaymentHash,
}
fn is_auto_retryable_now(&self) -> bool {
match self {
- PendingOutboundPayment::Retryable { retry_strategy: Some(strategy), attempts, .. } => {
+ PendingOutboundPayment::Retryable {
+ retry_strategy: Some(strategy), attempts, payment_params: Some(_), ..
+ } => {
strategy.is_retryable_now(&attempts)
},
_ => false,
_ => false,
}
}
- fn payment_parameters(&mut self) -> Option<&mut PaymentParameters> {
- match self {
- PendingOutboundPayment::Retryable { payment_params: Some(ref mut params), .. } => {
- Some(params)
- },
- _ => None,
- }
- }
pub fn insert_previously_failed_scid(&mut self, scid: u64) {
if let PendingOutboundPayment::Retryable { payment_params: Some(params), .. } = self {
params.previously_failed_channels.push(scid);
let our_payment_hash;
core::mem::swap(&mut session_privs, match self {
PendingOutboundPayment::Legacy { .. } |
- PendingOutboundPayment::Fulfilled { .. } =>
+ PendingOutboundPayment::Fulfilled { .. } =>
return Err(()),
- PendingOutboundPayment::Retryable { session_privs, payment_hash, .. } |
- PendingOutboundPayment::Abandoned { session_privs, payment_hash, .. } => {
- our_payment_hash = *payment_hash;
- session_privs
- },
+ PendingOutboundPayment::Retryable { session_privs, payment_hash, .. } |
+ PendingOutboundPayment::Abandoned { session_privs, payment_hash, .. } => {
+ our_payment_hash = *payment_hash;
+ session_privs
+ },
});
*self = PendingOutboundPayment::Abandoned { session_privs, payment_hash: our_payment_hash };
Ok(())
///
/// Each attempt may be multiple HTLCs along multiple paths if the router decides to split up a
/// retry, and may retry multiple failed HTLCs at once if they failed around the same time and
- /// were retried along a route from a single call to [`Router::find_route`].
+ /// were retried along a route from a single call to [`Router::find_route_with_id`].
Attempts(usize),
#[cfg(not(feature = "no-std"))]
- /// Time elapsed before abandoning retries for a payment.
+ /// Time elapsed before abandoning retries for a payment. At least one attempt at payment is made;
+ /// see [`PaymentParameters::expiry_time`] to avoid any attempt at payment after a specific time.
+ ///
+ /// [`PaymentParameters::expiry_time`]: crate::routing::router::PaymentParameters::expiry_time
Timeout(core::time::Duration),
}
/// it means the result of the first attempt is not known yet.
pub(crate) count: usize,
/// This field is only used when retry is `Retry::Timeout` which is only build with feature std
- first_attempted_at: T
+ #[cfg(not(feature = "no-std"))]
+ first_attempted_at: T,
+ #[cfg(feature = "no-std")]
+ phantom: core::marker::PhantomData<T>,
+
}
#[cfg(not(any(feature = "no-std", test)))]
pub(crate) fn new() -> Self {
PaymentAttemptsUsingTime {
count: 0,
- first_attempted_at: T::now()
+ #[cfg(not(feature = "no-std"))]
+ first_attempted_at: T::now(),
+ #[cfg(feature = "no-std")]
+ phantom: core::marker::PhantomData,
}
}
}
}
}
-/// If a payment fails to send, it can be in one of several states. This enum is returned as the
-/// Err() type describing which state the payment is in, see the description of individual enum
-/// states for more.
+/// Indicates an immediate error on [`ChannelManager::send_payment_with_retry`]. Further errors
+/// may be surfaced later via [`Event::PaymentPathFailed`] and [`Event::PaymentFailed`].
+///
+/// [`ChannelManager::send_payment_with_retry`]: crate::ln::channelmanager::ChannelManager::send_payment_with_retry
+/// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
+/// [`Event::PaymentFailed`]: crate::util::events::Event::PaymentFailed
+#[derive(Clone, Debug)]
+pub enum RetryableSendFailure {
+ /// The provided [`PaymentParameters::expiry_time`] indicated that the payment has expired. Note
+ /// that this error is *not* caused by [`Retry::Timeout`].
+ ///
+ /// [`PaymentParameters::expiry_time`]: crate::routing::router::PaymentParameters::expiry_time
+ PaymentExpired,
+ /// We were unable to find a route to the destination.
+ RouteNotFound,
+ /// Indicates that a payment for the provided [`PaymentId`] is already in-flight and has not
+ /// yet completed (i.e. generated an [`Event::PaymentSent`] or [`Event::PaymentFailed`]).
+ ///
+ /// [`PaymentId`]: crate::ln::channelmanager::PaymentId
+ /// [`Event::PaymentSent`]: crate::util::events::Event::PaymentSent
+ /// [`Event::PaymentFailed`]: crate::util::events::Event::PaymentFailed
+ DuplicatePayment,
+}
+
+/// If a payment fails to send with [`ChannelManager::send_payment`], it can be in one of several
+/// states. This enum is returned as the Err() type describing which state the payment is in, see
+/// the description of individual enum states for more.
+///
+/// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
#[derive(Clone, Debug)]
pub enum PaymentSendFailure {
/// A parameter which was passed to send_payment was invalid, preventing us from attempting to
///
/// You can freely resend the payment in full (with the parameter error fixed).
///
- /// Because the payment failed outright, no payment tracking is done, you do not need to call
- /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work
- /// for this payment.
+ /// Because the payment failed outright, no payment tracking is done and no
+ /// [`Event::PaymentPathFailed`] or [`Event::PaymentFailed`] events will be generated.
///
- /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
- /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
+ /// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
+ /// [`Event::PaymentFailed`]: crate::util::events::Event::PaymentFailed
ParameterError(APIError),
/// A parameter in a single path which was passed to send_payment was invalid, preventing us
/// from attempting to send the payment at all.
///
/// You can freely resend the payment in full (with the parameter error fixed).
///
+ /// Because the payment failed outright, no payment tracking is done and no
+ /// [`Event::PaymentPathFailed`] or [`Event::PaymentFailed`] events will be generated.
+ ///
/// The results here are ordered the same as the paths in the route object which was passed to
/// send_payment.
///
- /// Because the payment failed outright, no payment tracking is done, you do not need to call
- /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work
- /// for this payment.
- ///
- /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
- /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
+ /// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
+ /// [`Event::PaymentFailed`]: crate::util::events::Event::PaymentFailed
PathParameterError(Vec<Result<(), APIError>>),
/// All paths which were attempted failed to send, with no channel state change taking place.
/// You can freely resend the payment in full (though you probably want to do so over different
/// paths than the ones selected).
///
- /// Because the payment failed outright, no payment tracking is done, you do not need to call
- /// [`ChannelManager::abandon_payment`] and [`ChannelManager::retry_payment`] will *not* work
- /// for this payment.
+ /// Because the payment failed outright, no payment tracking is done and no
+ /// [`Event::PaymentPathFailed`] or [`Event::PaymentFailed`] events will be generated.
///
- /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
- /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
+ /// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
+ /// [`Event::PaymentFailed`]: crate::util::events::Event::PaymentFailed
AllFailedResendSafe(Vec<APIError>),
/// Indicates that a payment for the provided [`PaymentId`] is already in-flight and has not
- /// yet completed (i.e. generated an [`Event::PaymentSent`]) or been abandoned (via
- /// [`ChannelManager::abandon_payment`]).
+ /// yet completed (i.e. generated an [`Event::PaymentSent`] or [`Event::PaymentFailed`]).
///
/// [`PaymentId`]: crate::ln::channelmanager::PaymentId
/// [`Event::PaymentSent`]: crate::util::events::Event::PaymentSent
- /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
+ /// [`Event::PaymentFailed`]: crate::util::events::Event::PaymentFailed
DuplicatePayment,
- /// Some paths which were attempted failed to send, though possibly not all. At least some
- /// paths have irrevocably committed to the HTLC and retrying the payment in full would result
- /// in over-/re-payment.
+ /// Some paths that were attempted failed to send, though some paths may have succeeded. At least
+ /// some paths have irrevocably committed to the HTLC.
///
- /// The results here are ordered the same as the paths in the route object which was passed to
- /// send_payment, and any `Err`s which are not [`APIError::MonitorUpdateInProgress`] can be
- /// safely retried via [`ChannelManager::retry_payment`].
+ /// The results here are ordered the same as the paths in the route object that was passed to
+ /// send_payment.
///
- /// Any entries which contain `Err(APIError::MonitorUpdateInprogress)` or `Ok(())` MUST NOT be
- /// retried as they will result in over-/re-payment. These HTLCs all either successfully sent
- /// (in the case of `Ok(())`) or will send once a [`MonitorEvent::Completed`] is provided for
- /// the next-hop channel with the latest update_id.
+ /// Any entries that contain `Err(APIError::MonitorUpdateInprogress)` will send once a
+ /// [`MonitorEvent::Completed`] is provided for the next-hop channel with the latest update_id.
///
- /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
/// [`MonitorEvent::Completed`]: crate::chain::channelmonitor::MonitorEvent::Completed
PartialFailure {
- /// The errors themselves, in the same order as the route hops.
+ /// The errors themselves, in the same order as the paths from the route.
results: Vec<Result<(), APIError>>,
/// If some paths failed without irrevocably committing to the new HTLC(s), this will
- /// contain a [`RouteParameters`] object which can be used to calculate a new route that
- /// will pay all remaining unpaid balance.
+ /// contain a [`RouteParameters`] object for the failing paths.
failed_paths_retry: Option<RouteParameters>,
/// The payment id for the payment, which is now at least partially pending.
payment_id: PaymentId,
pub(super) struct OutboundPayments {
pub(super) pending_outbound_payments: Mutex<HashMap<PaymentId, PendingOutboundPayment>>,
+ pub(super) retry_lock: Mutex<()>,
}
impl OutboundPayments {
pub(super) fn new() -> Self {
Self {
- pending_outbound_payments: Mutex::new(HashMap::new())
+ pending_outbound_payments: Mutex::new(HashMap::new()),
+ retry_lock: Mutex::new(()),
}
}
- pub(super) fn send_payment<R: Deref, ES: Deref, NS: Deref, F, L: Deref>(
+ pub(super) fn send_payment<R: Deref, ES: Deref, NS: Deref, IH, SP, L: Deref>(
&self, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId,
retry_strategy: Retry, route_params: RouteParameters, router: &R,
- first_hops: Vec<ChannelDetails>, inflight_htlcs: InFlightHtlcs, entropy_source: &ES,
- node_signer: &NS, best_block_height: u32, logger: &L, send_payment_along_path: F,
- ) -> Result<(), PaymentSendFailure>
+ first_hops: Vec<ChannelDetails>, compute_inflight_htlcs: IH, entropy_source: &ES,
+ node_signer: &NS, best_block_height: u32, logger: &L,
+ pending_events: &Mutex<Vec<events::Event>>, send_payment_along_path: SP,
+ ) -> Result<(), RetryableSendFailure>
where
R::Target: Router,
ES::Target: EntropySource,
NS::Target: NodeSigner,
L::Target: Logger,
- F: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>,
+ IH: Fn() -> InFlightHtlcs,
+ SP: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>,
{
- self.pay_internal(payment_id, Some((payment_hash, payment_secret, None, retry_strategy)),
- route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer,
- best_block_height, logger, &send_payment_along_path)
- .map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e })
+ self.send_payment_internal(payment_id, payment_hash, payment_secret, None, retry_strategy,
+ route_params, router, first_hops, &compute_inflight_htlcs, entropy_source, node_signer,
+ best_block_height, logger, pending_events, &send_payment_along_path)
}
pub(super) fn send_payment_with_route<ES: Deref, NS: Deref, F>(
where
ES::Target: EntropySource,
NS::Target: NodeSigner,
- F: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ F: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
let onion_session_privs = self.add_new_pending_payment(payment_hash, *payment_secret, payment_id, None, route, None, None, entropy_source, best_block_height)?;
self.pay_route_internal(route, payment_hash, payment_secret, None, payment_id, None,
.map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e })
}
- pub(super) fn send_spontaneous_payment<R: Deref, ES: Deref, NS: Deref, F, L: Deref>(
+ pub(super) fn send_spontaneous_payment<R: Deref, ES: Deref, NS: Deref, IH, SP, L: Deref>(
&self, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId,
retry_strategy: Retry, route_params: RouteParameters, router: &R,
- first_hops: Vec<ChannelDetails>, inflight_htlcs: InFlightHtlcs, entropy_source: &ES,
- node_signer: &NS, best_block_height: u32, logger: &L, send_payment_along_path: F
- ) -> Result<PaymentHash, PaymentSendFailure>
+ first_hops: Vec<ChannelDetails>, inflight_htlcs: IH, entropy_source: &ES,
+ node_signer: &NS, best_block_height: u32, logger: &L,
+ pending_events: &Mutex<Vec<events::Event>>, send_payment_along_path: SP
+ ) -> Result<PaymentHash, RetryableSendFailure>
where
R::Target: Router,
ES::Target: EntropySource,
NS::Target: NodeSigner,
L::Target: Logger,
- F: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>,
+ IH: Fn() -> InFlightHtlcs,
+ SP: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>,
{
let preimage = payment_preimage
.unwrap_or_else(|| PaymentPreimage(entropy_source.get_secure_random_bytes()));
let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner());
- self.pay_internal(payment_id, Some((payment_hash, &None, Some(preimage), retry_strategy)),
+ self.send_payment_internal(payment_id, payment_hash, &None, Some(preimage), retry_strategy,
route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer,
- best_block_height, logger, &send_payment_along_path)
+ best_block_height, logger, pending_events, send_payment_along_path)
.map(|()| payment_hash)
- .map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e })
}
pub(super) fn send_spontaneous_payment_with_route<ES: Deref, NS: Deref, F>(
where
ES::Target: EntropySource,
NS::Target: NodeSigner,
- F: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ F: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
let preimage = payment_preimage
.unwrap_or_else(|| PaymentPreimage(entropy_source.get_secure_random_bytes()));
pub(super) fn check_retry_payments<R: Deref, ES: Deref, NS: Deref, SP, IH, FH, L: Deref>(
&self, router: &R, first_hops: FH, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS,
- best_block_height: u32, logger: &L, send_payment_along_path: SP,
+ best_block_height: u32, pending_events: &Mutex<Vec<events::Event>>, logger: &L,
+ send_payment_along_path: SP,
)
where
R::Target: Router,
ES::Target: EntropySource,
NS::Target: NodeSigner,
- SP: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>,
+ SP: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>,
IH: Fn() -> InFlightHtlcs,
FH: Fn() -> Vec<ChannelDetails>,
L::Target: Logger,
{
+ let _single_thread = self.retry_lock.lock().unwrap();
loop {
let mut outbounds = self.pending_outbound_payments.lock().unwrap();
let mut retry_id_route_params = None;
for (pmt_id, pmt) in outbounds.iter_mut() {
if pmt.is_auto_retryable_now() {
- if let PendingOutboundPayment::Retryable { pending_amt_msat, total_msat, payment_params: Some(params), .. } = pmt {
+ if let PendingOutboundPayment::Retryable { pending_amt_msat, total_msat, payment_params: Some(params), payment_hash, .. } = pmt {
if pending_amt_msat < total_msat {
- retry_id_route_params = Some((*pmt_id, RouteParameters {
+ retry_id_route_params = Some((*payment_hash, *pmt_id, RouteParameters {
final_value_msat: *total_msat - *pending_amt_msat,
- final_cltv_expiry_delta:
- if let Some(delta) = params.final_cltv_expiry_delta { delta }
- else {
- debug_assert!(false, "We always set the final_cltv_expiry_delta when a path fails");
- LDK_DEFAULT_MIN_FINAL_CLTV_EXPIRY_DELTA.into()
- },
payment_params: params.clone(),
}));
break
}
- }
+ } else { debug_assert!(false); }
}
}
- if let Some((payment_id, route_params)) = retry_id_route_params {
- core::mem::drop(outbounds);
- if let Err(e) = self.pay_internal(payment_id, None, route_params, router, first_hops(), inflight_htlcs(), entropy_source, node_signer, best_block_height, logger, &send_payment_along_path) {
- log_info!(logger, "Errored retrying payment: {:?}", e);
- }
+ core::mem::drop(outbounds);
+ if let Some((payment_hash, payment_id, route_params)) = retry_id_route_params {
+ self.retry_payment_internal(payment_hash, payment_id, route_params, router, first_hops(), &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, &send_payment_along_path)
} else { break }
}
+
+ let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+ outbounds.retain(|pmt_id, pmt| {
+ let mut retain = true;
+ if !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 {
+ if pmt.mark_abandoned().is_ok() {
+ pending_events.lock().unwrap().push(events::Event::PaymentFailed {
+ payment_id: *pmt_id,
+ payment_hash: pmt.payment_hash().expect("PendingOutboundPayments::Retryable always has a payment hash set"),
+ });
+ retain = false;
+ }
+ }
+ retain
+ });
}
- fn pay_internal<R: Deref, NS: Deref, ES: Deref, F, L: Deref>(
- &self, payment_id: PaymentId,
- initial_send_info: Option<(PaymentHash, &Option<PaymentSecret>, Option<PaymentPreimage>, Retry)>,
- route_params: RouteParameters, router: &R, first_hops: Vec<ChannelDetails>,
- inflight_htlcs: InFlightHtlcs, entropy_source: &ES, node_signer: &NS, best_block_height: u32,
- logger: &L, send_payment_along_path: &F,
- ) -> Result<(), PaymentSendFailure>
+ pub(super) fn needs_abandon(&self) -> bool {
+ let outbounds = self.pending_outbound_payments.lock().unwrap();
+ outbounds.iter().any(|(_, pmt)|
+ !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 && !pmt.is_fulfilled())
+ }
+
+ /// Errors immediately on [`RetryableSendFailure`] error conditions. Otherwise, further errors may
+ /// be surfaced asynchronously via [`Event::PaymentPathFailed`] and [`Event::PaymentFailed`].
+ ///
+ /// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
+ /// [`Event::PaymentFailed`]: crate::util::events::Event::PaymentFailed
+ fn send_payment_internal<R: Deref, NS: Deref, ES: Deref, IH, SP, L: Deref>(
+ &self, payment_id: PaymentId, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>,
+ keysend_preimage: Option<PaymentPreimage>, retry_strategy: Retry, route_params: RouteParameters,
+ router: &R, first_hops: Vec<ChannelDetails>, inflight_htlcs: IH, entropy_source: &ES,
+ node_signer: &NS, best_block_height: u32, logger: &L,
+ pending_events: &Mutex<Vec<events::Event>>, send_payment_along_path: SP,
+ ) -> Result<(), RetryableSendFailure>
where
R::Target: Router,
ES::Target: EntropySource,
NS::Target: NodeSigner,
L::Target: Logger,
- F: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ IH: Fn() -> InFlightHtlcs,
+ SP: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
#[cfg(feature = "std")] {
if has_expired(&route_params) {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: format!("Invoice expired for payment id {}", log_bytes!(payment_id.0)),
- }))
+ return Err(RetryableSendFailure::PaymentExpired)
}
}
- let route = router.find_route(
+ let route = router.find_route_with_id(
&node_signer.get_node_id(Recipient::Node).unwrap(), &route_params,
- Some(&first_hops.iter().collect::<Vec<_>>()), &inflight_htlcs
- ).map_err(|e| PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: format!("Failed to find a route for payment {}: {:?}", log_bytes!(payment_id.0), e), // TODO: add APIError::RouteNotFound
- }))?;
-
- let res = if let Some((payment_hash, payment_secret, keysend_preimage, retry_strategy)) = initial_send_info {
- let onion_session_privs = self.add_new_pending_payment(payment_hash, *payment_secret, payment_id, keysend_preimage, &route, Some(retry_strategy), Some(route_params.payment_params.clone()), entropy_source, best_block_height)?;
- self.pay_route_internal(&route, payment_hash, payment_secret, None, payment_id, None, onion_session_privs, node_signer, best_block_height, send_payment_along_path)
- } else {
- self.retry_payment_with_route(&route, payment_id, entropy_source, node_signer, best_block_height, send_payment_along_path)
- };
- match res {
- Err(PaymentSendFailure::AllFailedResendSafe(_)) => {
- let retry_res = self.pay_internal(payment_id, None, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, send_payment_along_path);
- log_info!(logger, "Result retrying payment id {}: {:?}", log_bytes!(payment_id.0), retry_res);
- if let Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { err })) = &retry_res {
- if err.starts_with("Retries exhausted ") { return res; }
- }
- retry_res
- },
- Err(PaymentSendFailure::PartialFailure { failed_paths_retry: Some(retry), .. }) => {
- // Some paths were sent, even if we failed to send the full MPP value our recipient may
- // misbehave and claim the funds, at which point we have to consider the payment sent, so
- // return `Ok()` here, ignoring any retry errors.
- let retry_res = self.pay_internal(payment_id, None, retry, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, send_payment_along_path);
- log_info!(logger, "Result retrying payment id {}: {:?}", log_bytes!(payment_id.0), retry_res);
- Ok(())
- },
- Err(PaymentSendFailure::PartialFailure { failed_paths_retry: None, .. }) => {
- // This may happen if we send a payment and some paths fail, but only due to a temporary
- // monitor failure or the like, implying they're really in-flight, but we haven't sent the
- // initial HTLC-Add messages yet.
- Ok(())
- },
- res => res,
+ Some(&first_hops.iter().collect::<Vec<_>>()), &inflight_htlcs(),
+ payment_hash, payment_id,
+ ).map_err(|_| RetryableSendFailure::RouteNotFound)?;
+
+ let onion_session_privs = self.add_new_pending_payment(payment_hash, *payment_secret,
+ payment_id, keysend_preimage, &route, Some(retry_strategy),
+ Some(route_params.payment_params.clone()), entropy_source, best_block_height)
+ .map_err(|_| RetryableSendFailure::DuplicatePayment)?;
+
+ let res = self.pay_route_internal(&route, payment_hash, payment_secret, None, payment_id, None,
+ onion_session_privs, node_signer, best_block_height, &send_payment_along_path);
+ log_info!(logger, "Result sending payment with id {}: {:?}", log_bytes!(payment_id.0), res);
+ if let Err(e) = res {
+ self.handle_pay_route_err(e, payment_id, payment_hash, route, route_params, router, first_hops, &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, &send_payment_along_path);
}
+ Ok(())
}
- pub(super) fn retry_payment_with_route<ES: Deref, NS: Deref, F>(
- &self, route: &Route, payment_id: PaymentId, entropy_source: &ES, node_signer: &NS, best_block_height: u32,
- send_payment_along_path: F
- ) -> Result<(), PaymentSendFailure>
+ fn retry_payment_internal<R: Deref, NS: Deref, ES: Deref, IH, SP, L: Deref>(
+ &self, payment_hash: PaymentHash, payment_id: PaymentId, route_params: RouteParameters,
+ router: &R, first_hops: Vec<ChannelDetails>, inflight_htlcs: &IH, entropy_source: &ES,
+ node_signer: &NS, best_block_height: u32, logger: &L,
+ pending_events: &Mutex<Vec<events::Event>>, send_payment_along_path: &SP,
+ )
where
+ R::Target: Router,
ES::Target: EntropySource,
NS::Target: NodeSigner,
- F: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ L::Target: Logger,
+ IH: Fn() -> InFlightHtlcs,
+ SP: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
- const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
+ #[cfg(feature = "std")] {
+ if has_expired(&route_params) {
+ log_error!(logger, "Payment params expired on retry, abandoning payment {}", log_bytes!(payment_id.0));
+ self.abandon_payment(payment_id, pending_events);
+ return
+ }
+ }
+
+ let route = match router.find_route_with_id(
+ &node_signer.get_node_id(Recipient::Node).unwrap(), &route_params,
+ Some(&first_hops.iter().collect::<Vec<_>>()), &inflight_htlcs(),
+ payment_hash, payment_id,
+ ) {
+ Ok(route) => route,
+ Err(e) => {
+ log_error!(logger, "Failed to find a route on retry, abandoning payment {}: {:#?}", log_bytes!(payment_id.0), e);
+ self.abandon_payment(payment_id, pending_events);
+ return
+ }
+ };
for path in route.paths.iter() {
if path.len() == 0 {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: "length-0 path in route".to_string()
- }))
+ log_error!(logger, "length-0 path in route");
+ self.abandon_payment(payment_id, pending_events);
+ return
}
}
+ const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
let mut onion_session_privs = Vec::with_capacity(route.paths.len());
for _ in 0..route.paths.len() {
onion_session_privs.push(entropy_source.get_secure_random_bytes());
}
- let (total_msat, payment_hash, payment_secret, keysend_preimage) = {
+ macro_rules! abandon_with_entry {
+ ($payment: expr) => {
+ if $payment.get_mut().mark_abandoned().is_ok() && $payment.get().remaining_parts() == 0 {
+ pending_events.lock().unwrap().push(events::Event::PaymentFailed {
+ payment_id,
+ payment_hash,
+ });
+ $payment.remove();
+ }
+ }
+ }
+ let (total_msat, payment_secret, keysend_preimage) = {
let mut outbounds = self.pending_outbound_payments.lock().unwrap();
- match outbounds.get_mut(&payment_id) {
- Some(payment) => {
- let res = match payment {
+ match outbounds.entry(payment_id) {
+ hash_map::Entry::Occupied(mut payment) => {
+ let res = match payment.get() {
PendingOutboundPayment::Retryable {
- total_msat, payment_hash, keysend_preimage, payment_secret, pending_amt_msat, ..
+ total_msat, keysend_preimage, payment_secret, pending_amt_msat, ..
} => {
let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum();
if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string()
- }))
+ log_error!(logger, "retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat);
+ abandon_with_entry!(payment);
+ return
}
- (*total_msat, *payment_hash, *payment_secret, *keysend_preimage)
+ (*total_msat, *payment_secret, *keysend_preimage)
},
PendingOutboundPayment::Legacy { .. } => {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string()
- }))
+ log_error!(logger, "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102");
+ return
},
PendingOutboundPayment::Fulfilled { .. } => {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: "Payment already completed".to_owned()
- }));
+ log_error!(logger, "Payment already completed");
+ return
},
PendingOutboundPayment::Abandoned { .. } => {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: "Payment already abandoned (with some HTLCs still pending)".to_owned()
- }));
+ log_error!(logger, "Payment already abandoned (with some HTLCs still pending)");
+ return
},
};
- if !payment.is_retryable_now() {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: format!("Retries exhausted for payment id {}", log_bytes!(payment_id.0)),
- }))
+ if !payment.get().is_retryable_now() {
+ log_error!(logger, "Retries exhausted for payment id {}", log_bytes!(payment_id.0));
+ abandon_with_entry!(payment);
+ return
}
- payment.increment_attempts();
+ payment.get_mut().increment_attempts();
for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
- assert!(payment.insert(*session_priv_bytes, path));
+ assert!(payment.get_mut().insert(*session_priv_bytes, path));
}
res
},
- None =>
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)),
- })),
+ hash_map::Entry::Vacant(_) => {
+ log_error!(logger, "Payment with ID {} not found", log_bytes!(payment_id.0));
+ return
+ }
}
};
- self.pay_route_internal(route, payment_hash, &payment_secret, keysend_preimage, payment_id, Some(total_msat), onion_session_privs, node_signer, best_block_height, &send_payment_along_path)
+ let res = self.pay_route_internal(&route, payment_hash, &payment_secret, keysend_preimage,
+ payment_id, Some(total_msat), onion_session_privs, node_signer, best_block_height,
+ &send_payment_along_path);
+ log_info!(logger, "Result retrying payment id {}: {:?}", log_bytes!(payment_id.0), res);
+ if let Err(e) = res {
+ self.handle_pay_route_err(e, payment_id, payment_hash, route, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
+ }
+ }
+
+ fn handle_pay_route_err<R: Deref, NS: Deref, ES: Deref, IH, SP, L: Deref>(
+ &self, err: PaymentSendFailure, payment_id: PaymentId, payment_hash: PaymentHash, route: Route,
+ mut route_params: RouteParameters, router: &R, first_hops: Vec<ChannelDetails>,
+ inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, logger: &L,
+ pending_events: &Mutex<Vec<events::Event>>, send_payment_along_path: &SP,
+ )
+ where
+ R::Target: Router,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ L::Target: Logger,
+ IH: Fn() -> InFlightHtlcs,
+ SP: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ {
+ match err {
+ PaymentSendFailure::AllFailedResendSafe(errs) => {
+ Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, errs.into_iter().map(|e| Err(e)), pending_events);
+ self.retry_payment_internal(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
+ },
+ PaymentSendFailure::PartialFailure { failed_paths_retry: Some(mut retry), results, .. } => {
+ Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut retry, route.paths, results.into_iter(), pending_events);
+ // Some paths were sent, even if we failed to send the full MPP value our recipient may
+ // misbehave and claim the funds, at which point we have to consider the payment sent, so
+ // return `Ok()` here, ignoring any retry errors.
+ self.retry_payment_internal(payment_hash, payment_id, retry, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
+ },
+ PaymentSendFailure::PartialFailure { failed_paths_retry: None, .. } => {
+ // This may happen if we send a payment and some paths fail, but only due to a temporary
+ // monitor failure or the like, implying they're really in-flight, but we haven't sent the
+ // initial HTLC-Add messages yet.
+ },
+ PaymentSendFailure::PathParameterError(results) => {
+ Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, results.into_iter(), pending_events);
+ self.abandon_payment(payment_id, pending_events);
+ },
+ PaymentSendFailure::ParameterError(e) => {
+ log_error!(logger, "Failed to send to route due to parameter error: {:?}. Your router is buggy", e);
+ self.abandon_payment(payment_id, pending_events);
+ },
+ PaymentSendFailure::DuplicatePayment => debug_assert!(false), // unreachable
+ }
+ }
+
+ fn push_path_failed_evs_and_scids<I: ExactSizeIterator + Iterator<Item = Result<(), APIError>>>(
+ payment_id: PaymentId, payment_hash: PaymentHash, route_params: &mut RouteParameters,
+ paths: Vec<Vec<RouteHop>>, path_results: I, pending_events: &Mutex<Vec<events::Event>>
+ ) {
+ let mut events = pending_events.lock().unwrap();
+ debug_assert_eq!(paths.len(), path_results.len());
+ for (path, path_res) in paths.into_iter().zip(path_results) {
+ if let Err(e) = path_res {
+ if let APIError::MonitorUpdateInProgress = e { continue }
+ let mut failed_scid = None;
+ if let APIError::ChannelUnavailable { .. } = e {
+ let scid = path[0].short_channel_id;
+ failed_scid = Some(scid);
+ route_params.payment_params.previously_failed_channels.push(scid);
+ }
+ events.push(events::Event::PaymentPathFailed {
+ payment_id: Some(payment_id),
+ payment_hash,
+ payment_failed_permanently: false,
+ failure: events::PathFailure::InitialSend { err: e },
+ path,
+ short_channel_id: failed_scid,
+ #[cfg(test)]
+ error_code: None,
+ #[cfg(test)]
+ error_data: None,
+ });
+ }
+ }
}
pub(super) fn send_probe<ES: Deref, NS: Deref, F>(
where
ES::Target: EntropySource,
NS::Target: NodeSigner,
- F: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ F: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
let payment_id = PaymentId(entropy_source.get_secure_random_bytes());
) -> Result<(), PaymentSendFailure>
where
NS::Target: NodeSigner,
- F: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ F: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
if route.paths.len() < 1 {
- return Err(PaymentSendFailure::ParameterError(APIError::InvalidRoute{err: "There must be at least one path to send over"}));
+ return Err(PaymentSendFailure::ParameterError(APIError::InvalidRoute{err: "There must be at least one path to send over".to_owned()}));
}
if payment_secret.is_none() && route.paths.len() > 1 {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()}));
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_owned()}));
}
let mut total_value = 0;
let our_node_id = node_signer.get_node_id(Recipient::Node).unwrap(); // TODO no unwrap
let mut path_errs = Vec::with_capacity(route.paths.len());
'path_check: for path in route.paths.iter() {
if path.len() < 1 || path.len() > 20 {
- path_errs.push(Err(APIError::InvalidRoute{err: "Path didn't go anywhere/had bogus size"}));
+ path_errs.push(Err(APIError::InvalidRoute{err: "Path didn't go anywhere/had bogus size".to_owned()}));
continue 'path_check;
}
for (idx, hop) in path.iter().enumerate() {
if idx != path.len() - 1 && hop.pubkey == our_node_id {
- path_errs.push(Err(APIError::InvalidRoute{err: "Path went through us but wasn't a simple rebalance loop to us"}));
+ path_errs.push(Err(APIError::InvalidRoute{err: "Path went through us but wasn't a simple rebalance loop to us".to_owned()}));
continue 'path_check;
}
}
let mut results = Vec::new();
debug_assert_eq!(route.paths.len(), onion_session_privs.len());
for (path, session_priv) in route.paths.iter().zip(onion_session_privs.into_iter()) {
- let mut path_res = send_payment_along_path(&path, &route.payment_params, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage, session_priv);
+ let mut path_res = send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage, session_priv);
match path_res {
Ok(_) => {},
Err(APIError::MonitorUpdateInProgress) => {
Some(RouteParameters {
payment_params: payment_params.clone(),
final_value_msat: pending_amt_unsent,
- final_cltv_expiry_delta:
- if let Some(delta) = payment_params.final_cltv_expiry_delta { delta }
- else { max_unsent_cltv_delta },
})
} else { None }
} else { None },
) -> Result<(), PaymentSendFailure>
where
NS::Target: NodeSigner,
- F: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ F: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
self.pay_route_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id,
recv_value_msat, onion_session_privs, node_signer, best_block_height,
.map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e })
}
- // If we failed to send any paths, we should remove the new PaymentId from the
- // `pending_outbound_payments` map, as the user isn't expected to `abandon_payment`.
+ // If we failed to send any paths, remove the new PaymentId from the `pending_outbound_payments`
+ // map as the payment is free to be resent.
fn remove_outbound_if_all_failed(&self, payment_id: PaymentId, err: &PaymentSendFailure) {
if let &PaymentSendFailure::AllFailedResendSafe(_) = err {
let removed = self.pending_outbound_payments.lock().unwrap().remove(&payment_id).is_some();
});
}
+ // Returns a bool indicating whether a PendingHTLCsForwardable event should be generated.
pub(super) fn fail_htlc<L: Deref>(
&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason,
path: &Vec<RouteHop>, session_priv: &SecretKey, payment_id: &PaymentId,
- payment_params: &Option<PaymentParameters>, probing_cookie_secret: [u8; 32],
- secp_ctx: &Secp256k1<secp256k1::All>, pending_events: &Mutex<Vec<events::Event>>, logger: &L
- ) where L::Target: Logger {
+ probing_cookie_secret: [u8; 32], secp_ctx: &Secp256k1<secp256k1::All>,
+ pending_events: &Mutex<Vec<events::Event>>, logger: &L
+ ) -> bool where L::Target: Logger {
#[cfg(test)]
let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_error.decode_onion_failure(secp_ctx, logger, &source);
#[cfg(not(test))]
let (network_update, short_channel_id, payment_retryable, _, _) = onion_error.decode_onion_failure(secp_ctx, logger, &source);
+ let payment_is_probe = payment_is_probe(payment_hash, &payment_id, probing_cookie_secret);
let mut session_priv_bytes = [0; 32];
session_priv_bytes.copy_from_slice(&session_priv[..]);
let mut outbounds = self.pending_outbound_payments.lock().unwrap();
- let mut all_paths_failed = false;
+
+ // If any payments already need retry, there's no need to generate a redundant
+ // `PendingHTLCsForwardable`.
+ let already_awaiting_retry = outbounds.iter().any(|(_, pmt)| {
+ let mut awaiting_retry = false;
+ if pmt.is_auto_retryable_now() {
+ if let PendingOutboundPayment::Retryable { pending_amt_msat, total_msat, .. } = pmt {
+ if pending_amt_msat < total_msat {
+ awaiting_retry = true;
+ }
+ }
+ }
+ awaiting_retry
+ });
+
let mut full_failure_ev = None;
- let mut pending_retry_ev = None;
- let mut retry = None;
+ let mut pending_retry_ev = false;
let attempts_remaining = if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(*payment_id) {
if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) {
log_trace!(logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
- return
+ return false
}
if payment.get().is_fulfilled() {
log_trace!(logger, "Received failure of HTLC with payment_hash {} after payment completion", log_bytes!(payment_hash.0));
- return
+ return false
}
- let is_retryable_now = payment.get().is_auto_retryable_now();
+ let mut is_retryable_now = payment.get().is_auto_retryable_now();
if let Some(scid) = short_channel_id {
+ // TODO: If we decided to blame ourselves (or one of our channels) in
+ // process_onion_failure we should close that channel as it implies our
+ // next-hop is needlessly blaming us!
payment.get_mut().insert_previously_failed_scid(scid);
}
- // We want to move towards only using the `PaymentParameters` in the outbound payments
- // map. However, for backwards-compatibility, we still need to support passing the
- // `PaymentParameters` data that was shoved in the HTLC (and given to us via
- // `payment_params`) back to the user.
- let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
- if let Some(params) = payment.get_mut().payment_parameters() {
- if params.final_cltv_expiry_delta.is_none() {
- // This should be rare, but a user could provide None for the payment data, and
- // we need it when we go to retry the payment, so fill it in.
- params.final_cltv_expiry_delta = Some(path_last_hop.cltv_expiry_delta);
- }
- retry = Some(RouteParameters {
- payment_params: params.clone(),
- final_value_msat: path_last_hop.fee_msat,
- final_cltv_expiry_delta: params.final_cltv_expiry_delta.unwrap(),
- });
- } else if let Some(params) = payment_params {
- retry = Some(RouteParameters {
- payment_params: params.clone(),
- final_value_msat: path_last_hop.fee_msat,
- final_cltv_expiry_delta:
- if let Some(delta) = params.final_cltv_expiry_delta { delta }
- else { path_last_hop.cltv_expiry_delta },
- });
+ if payment_is_probe || !is_retryable_now || !payment_retryable {
+ let _ = payment.get_mut().mark_abandoned(); // we'll only Err if it's a legacy payment
+ is_retryable_now = false;
}
-
if payment.get().remaining_parts() == 0 {
- all_paths_failed = true;
if payment.get().abandoned() {
- full_failure_ev = Some(events::Event::PaymentFailed {
- payment_id: *payment_id,
- payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"),
- });
+ if !payment_is_probe {
+ full_failure_ev = Some(events::Event::PaymentFailed {
+ payment_id: *payment_id,
+ payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"),
+ });
+ }
payment.remove();
}
}
is_retryable_now
} else {
log_trace!(logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
- return
+ return false
};
core::mem::drop(outbounds);
log_trace!(logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
let path_failure = {
- if payment_is_probe(payment_hash, &payment_id, probing_cookie_secret) {
+ if payment_is_probe {
if !payment_retryable {
events::Event::ProbeSuccessful {
payment_id: *payment_id,
}
}
} else {
- // TODO: If we decided to blame ourselves (or one of our channels) in
- // process_onion_failure we should close that channel as it implies our
- // next-hop is needlessly blaming us!
- if let Some(scid) = short_channel_id {
- retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
- }
- if payment_retryable && attempts_remaining && retry.is_some() {
+ // If we miss abandoning the payment above, we *must* generate an event here or else the
+ // payment will sit in our outbounds forever.
+ if attempts_remaining && !already_awaiting_retry {
debug_assert!(full_failure_ev.is_none());
- pending_retry_ev = Some(events::Event::PendingHTLCsForwardable {
- time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
- });
+ pending_retry_ev = true;
}
events::Event::PaymentPathFailed {
payment_id: Some(*payment_id),
payment_hash: payment_hash.clone(),
payment_failed_permanently: !payment_retryable,
- network_update,
- all_paths_failed,
+ failure: events::PathFailure::OnPath { network_update },
path: path.clone(),
short_channel_id,
- retry,
#[cfg(test)]
error_code: onion_error_code,
#[cfg(test)]
let mut pending_events = pending_events.lock().unwrap();
pending_events.push(path_failure);
if let Some(ev) = full_failure_ev { pending_events.push(ev); }
- if let Some(ev) = pending_retry_ev { pending_events.push(ev); }
+ pending_retry_ev
}
- pub(super) fn abandon_payment(&self, payment_id: PaymentId) -> Option<events::Event> {
- let mut failed_ev = None;
+ pub(super) fn abandon_payment(
+ &self, payment_id: PaymentId, pending_events: &Mutex<Vec<events::Event>>
+ ) {
let mut outbounds = self.pending_outbound_payments.lock().unwrap();
if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
if let Ok(()) = payment.get_mut().mark_abandoned() {
if payment.get().remaining_parts() == 0 {
- failed_ev = Some(events::Event::PaymentFailed {
+ pending_events.lock().unwrap().push(events::Event::PaymentFailed {
payment_id,
payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"),
});
}
}
}
- failed_ev
}
#[cfg(test)]
(0, session_privs, required),
(1, pending_fee_msat, option),
(2, payment_hash, required),
- (3, payment_params, option),
+ // Note that while we "default" payment_param's final CLTV expiry delta to 0 we should
+ // never see it - `payment_params` was added here after the field was added/required.
+ (3, payment_params, (option: ReadableArgs, 0)),
(4, payment_secret, option),
(5, keysend_preimage, option),
(6, total_msat, required),
#[cfg(test)]
mod tests {
- use bitcoin::blockdata::constants::genesis_block;
use bitcoin::network::constants::Network;
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
use crate::ln::PaymentHash;
- use crate::ln::channelmanager::{PaymentId, PaymentSendFailure};
+ use crate::ln::channelmanager::PaymentId;
+ use crate::ln::features::{ChannelFeatures, NodeFeatures};
use crate::ln::msgs::{ErrorAction, LightningError};
- use crate::ln::outbound_payment::{OutboundPayments, Retry};
+ use crate::ln::outbound_payment::{OutboundPayments, Retry, RetryableSendFailure};
use crate::routing::gossip::NetworkGraph;
- use crate::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteParameters};
- use crate::sync::Arc;
+ use crate::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteHop, RouteParameters};
+ use crate::sync::{Arc, Mutex};
use crate::util::errors::APIError;
+ use crate::util::events::{Event, PathFailure};
use crate::util::test_utils;
#[test]
fn do_fails_paying_after_expiration(on_retry: bool) {
let outbound_payments = OutboundPayments::new();
let logger = test_utils::TestLogger::new();
- let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
- let network_graph = Arc::new(NetworkGraph::new(genesis_hash, &logger));
- let router = test_utils::TestRouter::new(network_graph);
+ let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+ let scorer = Mutex::new(test_utils::TestScorer::new());
+ let router = test_utils::TestRouter::new(network_graph, &scorer);
let secp_ctx = Secp256k1::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let expired_route_params = RouteParameters {
payment_params,
final_value_msat: 0,
- final_cltv_expiry_delta: 0,
};
- let err = if on_retry {
- outbound_payments.pay_internal(
- PaymentId([0; 32]), None, expired_route_params, &&router, vec![], InFlightHtlcs::new(),
- &&keys_manager, &&keys_manager, 0, &&logger, &|_, _, _, _, _, _, _, _, _| Ok(())).unwrap_err()
+ let pending_events = Mutex::new(Vec::new());
+ if on_retry {
+ outbound_payments.add_new_pending_payment(PaymentHash([0; 32]), None, PaymentId([0; 32]), None,
+ &Route { paths: vec![], payment_params: None }, Some(Retry::Attempts(1)),
+ Some(expired_route_params.payment_params.clone()), &&keys_manager, 0).unwrap();
+ outbound_payments.retry_payment_internal(
+ PaymentHash([0; 32]), PaymentId([0; 32]), expired_route_params, &&router, vec![],
+ &|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
+ &pending_events, &|_, _, _, _, _, _, _, _| Ok(()));
+ let events = pending_events.lock().unwrap();
+ assert_eq!(events.len(), 1);
+ if let Event::PaymentFailed { .. } = events[0] { } else { panic!("Unexpected event"); }
} else {
- outbound_payments.send_payment(
+ let err = outbound_payments.send_payment(
PaymentHash([0; 32]), &None, PaymentId([0; 32]), Retry::Attempts(0), expired_route_params,
- &&router, vec![], InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
- |_, _, _, _, _, _, _, _, _| Ok(())).unwrap_err()
- };
- if let PaymentSendFailure::ParameterError(APIError::APIMisuseError { err }) = err {
- assert!(err.contains("Invoice expired"));
- } else { panic!("Unexpected error"); }
+ &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
+ &pending_events, |_, _, _, _, _, _, _, _| Ok(())).unwrap_err();
+ if let RetryableSendFailure::PaymentExpired = err { } else { panic!("Unexpected error"); }
+ }
}
#[test]
fn do_find_route_error(on_retry: bool) {
let outbound_payments = OutboundPayments::new();
let logger = test_utils::TestLogger::new();
- let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
- let network_graph = Arc::new(NetworkGraph::new(genesis_hash, &logger));
- let router = test_utils::TestRouter::new(network_graph);
+ let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+ let scorer = Mutex::new(test_utils::TestScorer::new());
+ let router = test_utils::TestRouter::new(network_graph, &scorer);
let secp_ctx = Secp256k1::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let route_params = RouteParameters {
payment_params,
final_value_msat: 0,
- final_cltv_expiry_delta: 0,
};
router.expect_find_route(route_params.clone(),
Err(LightningError { err: String::new(), action: ErrorAction::IgnoreError }));
- let err = if on_retry {
+ let pending_events = Mutex::new(Vec::new());
+ if on_retry {
outbound_payments.add_new_pending_payment(PaymentHash([0; 32]), None, PaymentId([0; 32]), None,
&Route { paths: vec![], payment_params: None }, Some(Retry::Attempts(1)),
Some(route_params.payment_params.clone()), &&keys_manager, 0).unwrap();
- outbound_payments.pay_internal(
- PaymentId([0; 32]), None, route_params, &&router, vec![], InFlightHtlcs::new(),
- &&keys_manager, &&keys_manager, 0, &&logger, &|_, _, _, _, _, _, _, _, _| Ok(())).unwrap_err()
+ outbound_payments.retry_payment_internal(
+ PaymentHash([0; 32]), PaymentId([0; 32]), route_params, &&router, vec![],
+ &|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
+ &pending_events, &|_, _, _, _, _, _, _, _| Ok(()));
+ let events = pending_events.lock().unwrap();
+ assert_eq!(events.len(), 1);
+ if let Event::PaymentFailed { .. } = events[0] { } else { panic!("Unexpected event"); }
} else {
- outbound_payments.send_payment(
+ let err = outbound_payments.send_payment(
PaymentHash([0; 32]), &None, PaymentId([0; 32]), Retry::Attempts(0), route_params,
- &&router, vec![], InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
- |_, _, _, _, _, _, _, _, _| Ok(())).unwrap_err()
+ &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
+ &pending_events, |_, _, _, _, _, _, _, _| Ok(())).unwrap_err();
+ if let RetryableSendFailure::RouteNotFound = err {
+ } else { panic!("Unexpected error"); }
+ }
+ }
+
+ #[test]
+ fn initial_send_payment_path_failed_evs() {
+ let outbound_payments = OutboundPayments::new();
+ let logger = test_utils::TestLogger::new();
+ let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+ let scorer = Mutex::new(test_utils::TestScorer::new());
+ let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let secp_ctx = Secp256k1::new();
+ let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
+
+ let sender_pk = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+ let receiver_pk = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[43; 32]).unwrap());
+ let payment_params = PaymentParameters::from_node_id(sender_pk, 0);
+ let route_params = RouteParameters {
+ payment_params: payment_params.clone(),
+ final_value_msat: 0,
+ };
+ let failed_scid = 42;
+ let route = Route {
+ paths: vec![vec![RouteHop {
+ pubkey: receiver_pk,
+ node_features: NodeFeatures::empty(),
+ short_channel_id: failed_scid,
+ channel_features: ChannelFeatures::empty(),
+ fee_msat: 0,
+ cltv_expiry_delta: 0,
+ }]],
+ payment_params: Some(payment_params),
};
- if let PaymentSendFailure::ParameterError(APIError::APIMisuseError { err }) = err {
- assert!(err.contains("Failed to find a route"));
- } else { panic!("Unexpected error"); }
+ router.expect_find_route(route_params.clone(), Ok(route.clone()));
+ let mut route_params_w_failed_scid = route_params.clone();
+ route_params_w_failed_scid.payment_params.previously_failed_channels.push(failed_scid);
+ router.expect_find_route(route_params_w_failed_scid, Ok(route.clone()));
+ router.expect_find_route(route_params.clone(), Ok(route.clone()));
+ router.expect_find_route(route_params.clone(), Ok(route.clone()));
+
+ // Ensure that a ChannelUnavailable error will result in blaming an scid in the
+ // PaymentPathFailed event.
+ let pending_events = Mutex::new(Vec::new());
+ outbound_payments.send_payment(
+ PaymentHash([0; 32]), &None, PaymentId([0; 32]), Retry::Attempts(0), route_params.clone(),
+ &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
+ &pending_events,
+ |_, _, _, _, _, _, _, _| Err(APIError::ChannelUnavailable { err: "test".to_owned() }))
+ .unwrap();
+ let mut events = pending_events.lock().unwrap();
+ assert_eq!(events.len(), 2);
+ if let Event::PaymentPathFailed {
+ short_channel_id,
+ failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { .. }}, .. } = events[0]
+ {
+ assert_eq!(short_channel_id, Some(failed_scid));
+ } else { panic!("Unexpected event"); }
+ if let Event::PaymentFailed { .. } = events[1] { } else { panic!("Unexpected event"); }
+ events.clear();
+ core::mem::drop(events);
+
+ // Ensure that a MonitorUpdateInProgress "error" will not result in a PaymentPathFailed event.
+ outbound_payments.send_payment(
+ PaymentHash([0; 32]), &None, PaymentId([0; 32]), Retry::Attempts(0), route_params.clone(),
+ &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
+ &pending_events, |_, _, _, _, _, _, _, _| Err(APIError::MonitorUpdateInProgress))
+ .unwrap();
+ {
+ let events = pending_events.lock().unwrap();
+ assert_eq!(events.len(), 0);
+ }
+
+ // Ensure that any other error will result in a PaymentPathFailed event but no blamed scid.
+ outbound_payments.send_payment(
+ PaymentHash([0; 32]), &None, PaymentId([1; 32]), Retry::Attempts(0), route_params.clone(),
+ &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
+ &pending_events,
+ |_, _, _, _, _, _, _, _| Err(APIError::APIMisuseError { err: "test".to_owned() }))
+ .unwrap();
+ let events = pending_events.lock().unwrap();
+ assert_eq!(events.len(), 2);
+ if let Event::PaymentPathFailed {
+ short_channel_id,
+ failure: PathFailure::InitialSend { err: APIError::APIMisuseError { .. }}, .. } = events[0]
+ {
+ assert_eq!(short_channel_id, None);
+ } else { panic!("Unexpected event"); }
+ if let Event::PaymentFailed { .. } = events[1] { } else { panic!("Unexpected event"); }
}
}
use crate::ln::msgs;
use crate::ln::msgs::ChannelMessageHandler;
use crate::ln::outbound_payment::Retry;
-use crate::routing::gossip::RoutingFees;
+use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
use crate::routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RouteParameters};
-use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
+use crate::routing::scoring::ChannelUsage;
+use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure};
use crate::util::test_utils;
use crate::util::errors::APIError;
use crate::util::ser::Writeable;
#[cfg(feature = "std")]
use {
crate::util::time::tests::SinceEpoch,
- std::time::{SystemTime, Duration}
+ std::time::{SystemTime, Instant, Duration}
};
-#[test]
-fn retry_single_path_payment() {
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
- let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
-
- let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1);
- let chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1);
- // Rebalance to find a route
- send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
-
- let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
-
- // Rebalance so that the first hop fails.
- send_payment(&nodes[1], &vec!(&nodes[2])[..], 2_000_000);
-
- // Make sure the payment fails on the first hop.
- let payment_id = PaymentId(payment_hash.0);
- nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap();
- check_added_monitors!(nodes[0], 1);
- let mut events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- let mut payment_event = SendEvent::from_event(events.pop().unwrap());
- nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
- check_added_monitors!(nodes[1], 0);
- commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
- expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1.2 }]);
- let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
- assert!(htlc_updates.update_add_htlcs.is_empty());
- assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
- assert!(htlc_updates.update_fulfill_htlcs.is_empty());
- assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
- check_added_monitors!(nodes[1], 1);
- nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
- commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false);
- expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
-
- // Rebalance the channel so the retry succeeds.
- send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
-
- // Mine two blocks (we expire retries after 3, so this will check that we don't expire early)
- connect_blocks(&nodes[0], 2);
-
- // Retry the payment and make sure it succeeds.
- nodes[0].node.retry_payment(&route, payment_id).unwrap();
- check_added_monitors!(nodes[0], 1);
- let mut events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 100_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None);
- claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
-}
-
#[test]
fn mpp_failure() {
let chanmon_cfgs = create_chanmon_cfgs(4);
// Rebalance
send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
- let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 1_000_000);
+ let amt_msat = 1_000_000;
+ let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], amt_msat);
let path = route.paths[0].clone();
route.paths.push(path);
route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
// Initiate the MPP payment.
let payment_id = PaymentId(payment_hash.0);
- nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap();
+ let mut route_params = RouteParameters {
+ payment_params: route.payment_params.clone().unwrap(),
+ final_value_msat: amt_msat,
+ };
+
+ nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+ nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), payment_id, route_params.clone(), Retry::Attempts(1)).unwrap();
check_added_monitors!(nodes[0], 2); // one monitor per path
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
// Pass half of the payment along the success path.
- let (success_path_msgs, mut events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &events);
+ let success_path_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), success_path_msgs, false, None);
// Add the HTLC along the first hop.
- let (fail_path_msgs_1, _events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events);
+ let fail_path_msgs_1 = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
let (update_add, commitment_signed) = match fail_path_msgs_1 {
MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
assert_eq!(update_add_htlcs.len(), 1);
check_added_monitors!(nodes[2], 1);
nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false);
- expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
+ let mut events = nodes[0].node.get_and_clear_pending_events();
+ match events[1] {
+ Event::PendingHTLCsForwardable { .. } => {},
+ _ => panic!("Unexpected event")
+ }
+ events.remove(1);
+ expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
// Rebalance the channel so the second half of the payment can succeed.
send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
- // Make sure it errors as expected given a too-large amount.
- if let Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { err })) = nodes[0].node.retry_payment(&route, payment_id) {
- assert!(err.contains("over total_payment_amt_msat"));
- } else { panic!("Unexpected error"); }
-
- // Make sure it errors as expected given the wrong payment_id.
- if let Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { err })) = nodes[0].node.retry_payment(&route, PaymentId([0; 32])) {
- assert!(err.contains("not found"));
- } else { panic!("Unexpected error"); }
-
// Retry the second half of the payment and make sure it succeeds.
- let mut path = route.clone();
- path.paths.remove(0);
- nodes[0].node.retry_payment(&path, payment_id).unwrap();
+ route.paths.remove(0);
+ route_params.final_value_msat = 1_000_000;
+ route_params.payment_params.previously_failed_channels.push(chan_4_update.contents.short_channel_id);
+ nodes[0].router.expect_find_route(route_params, Ok(route));
+ nodes[0].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
assert_eq!(events.len(), 2);
// Pass half of the payment along the first path.
- let (node_1_msgs, mut events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &events);
+ let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
if send_partial_mpp {
expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..]));
} else {
// Pass half of the payment along the second path.
- let (node_2_msgs, _events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events);
+ let node_2_msgs = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_2_msgs, true, None);
// Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts
do_mpp_receive_timeout(false);
}
-#[test]
-fn retry_expired_payment() {
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
- let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
-
- let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1);
- let chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1);
- // Rebalance to find a route
- send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
-
- let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
-
- // Rebalance so that the first hop fails.
- send_payment(&nodes[1], &vec!(&nodes[2])[..], 2_000_000);
-
- // Make sure the payment fails on the first hop.
- nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
- check_added_monitors!(nodes[0], 1);
- let mut events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- let mut payment_event = SendEvent::from_event(events.pop().unwrap());
- nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
- check_added_monitors!(nodes[1], 0);
- commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
- expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1.2 }]);
- let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
- assert!(htlc_updates.update_add_htlcs.is_empty());
- assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
- assert!(htlc_updates.update_fulfill_htlcs.is_empty());
- assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
- check_added_monitors!(nodes[1], 1);
- nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
- commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false);
- expect_payment_failed!(nodes[0], payment_hash, false);
-
- // Mine blocks so the payment will have expired.
- connect_blocks(&nodes[0], 3);
-
- // Retry the payment and make sure it errors as expected.
- if let Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { err })) = nodes[0].node.retry_payment(&route, PaymentId(payment_hash.0)) {
- assert!(err.contains("not found"));
- } else {
- panic!("Unexpected error");
- }
-}
-
#[test]
fn no_pending_leak_on_initial_send_failure() {
// In an earlier version of our payment tracking, we'd have a retry entry even when the initial
let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)),
true, APIError::ChannelUnavailable { ref err },
- assert_eq!(err, "Peer for first hop currently disconnected/pending monitor update!"));
+ assert_eq!(err, "Peer for first hop currently disconnected"));
assert!(!nodes[0].node.has_pending_payments());
}
// Send two payments - one which will get to nodes[2] and will be claimed, one which we'll time
// out and retry.
- let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
+ let amt_msat = 1_000_000;
+ let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
let (payment_preimage_1, payment_hash_1, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000);
- nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ let route_params = RouteParameters {
+ payment_params: route.payment_params.clone().unwrap(),
+ final_value_msat: amt_msat,
+ };
+ nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
// We relay the payment to nodes[1] while its disconnected from nodes[2], causing the payment
// to be returned immediately to nodes[0], without having nodes[2] fail the inbound payment
// which would prevent retry.
- nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
- nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true);
assert_eq!(as_broadcasted_txn.len(), 1);
assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
// Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
// error, as the channel has hit the chain.
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
let as_err = nodes[0].node.get_and_clear_pending_msg_events();
confirm_transaction(&nodes[0], &first_htlc_timeout_tx);
}
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
- expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new());
// Finally, retry the payment (which was reloaded from the ChannelMonitor when nodes[0] was
// reloaded) via a route over the new channel, which work without issue and eventually be
nodes[1].node.timer_tick_occurred();
}
- assert!(nodes[0].node.retry_payment(&new_route, payment_id_1).is_err()); // Shouldn't be allowed to retry a fulfilled payment
- nodes[0].node.retry_payment(&new_route, PaymentId(payment_hash.0)).unwrap();
+ assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id_1).is_err()); // Shouldn't be allowed to retry a fulfilled payment
+ nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized], first_persister, first_new_chain_monitor, first_nodes_0_deserialized);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
// On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
// force-close the channel.
assert!(nodes[0].node.has_pending_payments());
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
// Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
// error, as the channel has hit the chain.
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
let as_err = nodes[0].node.get_and_clear_pending_msg_events();
// If we attempt to retry prior to the HTLC-Timeout (or commitment transaction, for dust HTLCs)
// confirming, we will fail as it's considered still-pending...
let (new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 });
- assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_err());
+ match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) {
+ Err(PaymentSendFailure::DuplicatePayment) => {},
+ _ => panic!("Unexpected error")
+ }
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
// After ANTI_REORG_DELAY confirmations, the HTLC should be failed and we can try the payment
// (which should also still work).
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
- // We set mpp_parts_remain to avoid having abandon_payment called
- expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new());
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode();
nodes_0_serialized = nodes[0].node.encode();
- assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_ok());
+ // After the payment failed, we're free to send it again.
+ assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id).is_ok());
assert!(!nodes[0].node.get_and_clear_pending_msg_events().is_empty());
reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
// Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures
// the payment is not (spuriously) listed as still pending.
- assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_ok());
+ assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id).is_ok());
check_added_monitors!(nodes[0], 1);
pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], if use_dust { 1_000 } else { 1_000_000 }, payment_hash, payment_secret);
claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
- assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_err());
+ match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) {
+ Err(PaymentSendFailure::DuplicatePayment) => {},
+ _ => panic!("Unexpected error")
+ }
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode();
nodes_0_serialized = nodes[0].node.encode();
- // Ensure that after reload we cannot retry the payment.
+ // Check that after reload we can send the payment again (though we shouldn't, since it was
+ // claimed previously).
reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
- assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_err());
+ match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) {
+ Err(PaymentSendFailure::DuplicatePayment) => {},
+ _ => panic!("Unexpected error")
+ }
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
}
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
// Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
// Now reload nodes[1]...
reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
nodes[1].node.fail_htlc_backwards(&payment_hash);
let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
.with_features(nodes[1].node.invoice_features());
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let route = get_route(
},
_ => panic!(),
};
+ assert!(!nodes[0].node.has_pending_payments());
}
#[test]
},
_ => panic!(),
};
+ assert!(!nodes[0].node.has_pending_payments());
}
#[test]
}
}
assert!(found_probe_failed);
+ assert!(!nodes[0].node.has_pending_payments());
}
#[test]
nodes[1].node.fail_htlc_backwards(&first_payment_hash);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
- pass_failed_payment_back_no_abandon(&nodes[0], &[&[&nodes[1]]], false, first_payment_hash);
- check_send_rejected!();
-
- // Until we abandon the payment, no matter how many timer ticks pass, we still cannot reuse the
+ // Until we abandon the payment upon path failure, no matter how many timer ticks pass, we still cannot reuse the
// PaymentId.
for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
nodes[0].node.timer_tick_occurred();
}
check_send_rejected!();
- nodes[0].node.abandon_payment(payment_id);
- get_event!(nodes[0], Event::PaymentFailed);
+ pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, first_payment_hash);
- // However, we can reuse the PaymentId immediately after we `abandon_payment`.
+ // However, we can reuse the PaymentId immediately after we `abandon_payment` upon passing the
+ // failed payment back.
nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id).unwrap();
check_added_monitors!(nodes[0], 1);
pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret);
let (_, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
// Send and claim the payment. Inflight HTLCs should be empty.
- let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 500000);
- nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
- check_added_monitors!(nodes[0], 1);
- pass_along_route(&nodes[0], &[&vec!(&nodes[1], &nodes[2])[..]], 500000, payment_hash, payment_secret);
- claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], payment_preimage);
+ let payment_hash = send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000).1;
+ let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
{
- let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
-
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
- let mut node_1_per_peer_lock;
- let mut node_1_peer_state_lock;
let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
- let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
channel_1.get_short_channel_id().unwrap()
);
+ assert_eq!(chan_1_used_liquidity, None);
+ }
+ {
+ let mut node_1_per_peer_lock;
+ let mut node_1_peer_state_lock;
+ let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
+
let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
channel_2.get_short_channel_id().unwrap()
);
- assert_eq!(chan_1_used_liquidity, None);
assert_eq!(chan_2_used_liquidity, None);
}
let pending_payments = nodes[0].node.list_recent_payments();
}
// Send the payment, but do not claim it. Our inflight HTLCs should contain the pending payment.
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 500000);
+ let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000);
+ let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
{
- let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
-
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
- let mut node_1_per_peer_lock;
- let mut node_1_peer_state_lock;
let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
- let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
channel_1.get_short_channel_id().unwrap()
);
+ // First hop accounts for expected 1000 msat fee
+ assert_eq!(chan_1_used_liquidity, Some(501000));
+ }
+ {
+ let mut node_1_per_peer_lock;
+ let mut node_1_peer_state_lock;
+ let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
+
let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
channel_2.get_short_channel_id().unwrap()
);
- // First hop accounts for expected 1000 msat fee
- assert_eq!(chan_1_used_liquidity, Some(501000));
assert_eq!(chan_2_used_liquidity, Some(500000));
}
let pending_payments = nodes[0].node.list_recent_payments();
nodes[0].node.timer_tick_occurred();
}
+ let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
{
- let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
-
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
- let mut node_1_per_peer_lock;
- let mut node_1_peer_state_lock;
let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
- let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
channel_1.get_short_channel_id().unwrap()
);
+ assert_eq!(chan_1_used_liquidity, None);
+ }
+ {
+ let mut node_1_per_peer_lock;
+ let mut node_1_peer_state_lock;
+ let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
+
let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
channel_2.get_short_channel_id().unwrap()
);
-
- assert_eq!(chan_1_used_liquidity, None);
assert_eq!(chan_2_used_liquidity, None);
}
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), Some(zero_conf_chan_config)]);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[0].keys_manager.get_secure_random_bytes();
let _ = create_announced_chan_between_nodes(&nodes, 0, 1).2;
let route_params = RouteParameters {
payment_params,
final_value_msat: amt_msat,
- final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
let route = get_route(
&nodes[0].node.get_our_node_id(), &route_params.payment_params,
&nodes[0].network_graph.read_only(), None, route_params.final_value_msat,
- route_params.final_cltv_expiry_delta, nodes[0].logger, &scorer, &random_seed_bytes
+ route_params.payment_params.final_cltv_expiry_delta, nodes[0].logger, &scorer,
+ &random_seed_bytes,
).unwrap();
let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap();
FailAttempts,
FailTimeout,
FailOnRestart,
+ FailOnRetry,
}
#[test]
do_automatic_retries(AutoRetry::FailAttempts);
do_automatic_retries(AutoRetry::FailTimeout);
do_automatic_retries(AutoRetry::FailOnRestart);
+ do_automatic_retries(AutoRetry::FailOnRetry);
}
fn do_automatic_retries(test: AutoRetry) {
// Test basic automatic payment retries in ChannelManager. See individual `test` variant comments
let route_params = RouteParameters {
payment_params,
final_value_msat: amt_msat,
- final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
check_added_monitors!(&nodes[1], 1);
assert!(update_1.update_fail_htlcs.len() == 1);
let fail_msg = update_1.update_fail_htlcs[0].clone();
-
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
commitment_signed_dance!(nodes[0], nodes[1], update_1.commitment_signed, false);
// Ensure the attempt fails and a new PendingHTLCsForwardable event is generated for the retry
let mut events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
match events[0] {
Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => {
assert_eq!(payment_hash, ev_payment_hash);
_ => panic!("Unexpected event"),
}
if $expect_pending_htlcs_forwardable {
- assert_eq!(events.len(), 2);
match events[1] {
Event::PendingHTLCsForwardable { .. } => {},
_ => panic!("Unexpected event"),
}
- } else { assert_eq!(events.len(), 1) }
+ } else {
+ match events[1] {
+ Event::PaymentFailed { payment_hash: ev_payment_hash, .. } => {
+ assert_eq!(payment_hash, ev_payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
}
}
nodes[0].node.process_pending_htlc_forwards();
let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 0);
-
- nodes[0].node.abandon_payment(PaymentId(payment_hash.0));
- let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
- assert_eq!(payment_hash, *ev_payment_hash);
- assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
- },
- _ => panic!("Unexpected event"),
- }
} else if test == AutoRetry::FailTimeout {
#[cfg(not(feature = "no-std"))] {
// Ensure ChannelManager will not retry a payment if it times out due to Retry::Timeout.
let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 0);
- nodes[0].node.abandon_payment(PaymentId(payment_hash.0));
let mut events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
let chan_1_monitor_serialized = get_monitor!(nodes[0], channel_id_1).encode();
reload_node!(nodes[0], node_encoded, &[&chan_1_monitor_serialized], persister, new_chain_monitor, node_0_deserialized);
+ let mut events = nodes[0].node.get_and_clear_pending_events();
+ expect_pending_htlcs_forwardable_from_events!(nodes[0], events, true);
// Make sure we don't retry again.
+ let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 0);
+
+ let mut events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
+ assert_eq!(payment_hash, *ev_payment_hash);
+ assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ } else if test == AutoRetry::FailOnRetry {
+ nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
+ pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
+
+ // We retry payments in `process_pending_htlc_forwards`. Since our channel closed, we should
+ // fail to find a route.
nodes[0].node.process_pending_htlc_forwards();
let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 0);
- nodes[0].node.abandon_payment(PaymentId(payment_hash.0));
let mut events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
let route_params = RouteParameters {
payment_params,
final_value_msat: amt_msat,
- final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
// Ensure the first monitor update (for the initial send path1 over chan_1) succeeds, but the
payment_params: Some(route_params.payment_params.clone()),
};
nodes[0].router.expect_find_route(route_params.clone(), Ok(send_route));
+ let mut payment_params = route_params.payment_params.clone();
+ payment_params.previously_failed_channels.push(chan_2_id);
nodes[0].router.expect_find_route(RouteParameters {
- payment_params: route_params.payment_params.clone(),
- final_value_msat: amt_msat / 2, final_cltv_expiry_delta: TEST_FINAL_CLTV
+ payment_params, final_value_msat: amt_msat / 2,
}, Ok(retry_1_route));
+ let mut payment_params = route_params.payment_params.clone();
+ payment_params.previously_failed_channels.push(chan_3_id);
nodes[0].router.expect_find_route(RouteParameters {
- payment_params: route_params.payment_params.clone(),
- final_value_msat: amt_msat / 4, final_cltv_expiry_delta: TEST_FINAL_CLTV
+ payment_params, final_value_msat: amt_msat / 4,
}, Ok(retry_2_route));
// Send a payment that will partially fail on send, then partially fail on retry, then succeed.
nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(3)).unwrap();
let closed_chan_events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(closed_chan_events.len(), 2);
+ assert_eq!(closed_chan_events.len(), 4);
match closed_chan_events[0] {
Event::ChannelClosed { .. } => {},
_ => panic!("Unexpected event"),
}
match closed_chan_events[1] {
+ Event::PaymentPathFailed { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ match closed_chan_events[2] {
Event::ChannelClosed { .. } => {},
_ => panic!("Unexpected event"),
}
+ match closed_chan_events[3] {
+ Event::PaymentPathFailed { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
// Pass the first part of the payment along the path.
check_added_monitors!(nodes[0], 5); // three outbound channel updates succeeded, two permanently failed
let route_params = RouteParameters {
payment_params,
final_value_msat: amt_msat,
- final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
- let err = nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap_err();
- if let PaymentSendFailure::AllFailedResendSafe(_) = err {
- } else { panic!("Unexpected error"); }
+ nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 2); // channel close messages
- assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 1); // channel close event
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 3);
+ if let Event::ChannelClosed { .. } = events[0] { } else { panic!(); }
+ if let Event::PaymentPathFailed { .. } = events[1] { } else { panic!(); }
+ if let Event::PaymentFailed { .. } = events[2] { } else { panic!(); }
check_added_monitors!(nodes[0], 2);
}
let route_params = RouteParameters {
payment_params,
final_value_msat: amt_msat,
- final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
let route_params = RouteParameters {
payment_params: payment_params.clone(),
final_value_msat: amt_msat,
- final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
let chans = nodes[0].node.list_usable_channels();
// On retry, split the payment across both channels.
route.paths[0][0].fee_msat = 50_000_001;
route.paths[1][0].fee_msat = 50_000_000;
+ let mut pay_params = route.payment_params.clone().unwrap();
+ pay_params.previously_failed_channels.push(chans[1].short_channel_id.unwrap());
nodes[0].router.expect_find_route(RouteParameters {
- payment_params: route.payment_params.clone().unwrap(),
+ payment_params: pay_params,
// Note that the second request here requests the amount we originally failed to send,
// not the amount remaining on the full payment, which should be changed.
- final_value_msat: 100_000_001, final_cltv_expiry_delta: TEST_FINAL_CLTV
+ final_value_msat: 100_000_001,
}, Ok(route.clone()));
+ {
+ let scorer = chanmon_cfgs[0].scorer.lock().unwrap();
+ // The initial send attempt, 2 paths
+ scorer.expect_usage(chans[0].short_channel_id.unwrap(), ChannelUsage { amount_msat: 10_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
+ scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 100_000_001, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
+ // The retry, 2 paths. Ensure that the in-flight HTLC amount is factored in.
+ scorer.expect_usage(chans[0].short_channel_id.unwrap(), ChannelUsage { amount_msat: 50_000_001, inflight_htlc_msat: 10_000, effective_capacity: EffectiveCapacity::Unknown });
+ scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 50_000_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
+ }
+
nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently: false,
+ failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { err: ref err_msg }},
+ short_channel_id: Some(expected_scid), .. } =>
+ {
+ assert_eq!(payment_hash, ev_payment_hash);
+ assert_eq!(expected_scid, route.paths[1][0].short_channel_id);
+ assert!(err_msg.contains("max HTLC"));
+ },
+ _ => panic!("Unexpected event"),
+ }
let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(htlc_msgs.len(), 2);
check_added_monitors!(nodes[0], 2);
let route_params = RouteParameters {
payment_params,
final_value_msat: amt_msat,
- final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
let chans = nodes[0].node.list_usable_channels();
route.paths[0][0].short_channel_id = chans[1].short_channel_id.unwrap();
route.paths[0][0].fee_msat = 50_000_000;
route.paths[1][0].fee_msat = 50_000_001;
+ let mut pay_params = route_params.payment_params.clone();
+ pay_params.previously_failed_channels.push(chans[0].short_channel_id.unwrap());
nodes[0].router.expect_find_route(RouteParameters {
- payment_params: route_params.payment_params.clone(),
- final_value_msat: amt_msat, final_cltv_expiry_delta: TEST_FINAL_CLTV
+ payment_params: pay_params, final_value_msat: amt_msat,
}, Ok(route.clone()));
nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently: false,
+ failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { err: ref err_msg }},
+ short_channel_id: Some(expected_scid), .. } =>
+ {
+ assert_eq!(payment_hash, ev_payment_hash);
+ assert_eq!(expected_scid, route.paths[1][0].short_channel_id);
+ assert!(err_msg.contains("max HTLC"));
+ },
+ _ => panic!("Unexpected event"),
+ }
let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(htlc_msgs.len(), 2);
check_added_monitors!(nodes[0], 2);
#[test]
fn no_extra_retries_on_back_to_back_fail() {
// In a previous release, we had a race where we may exceed the payment retry count if we
- // get two failures in a row with the second having `all_paths_failed` set.
+ // get two failures in a row with the second indicating that all paths had failed (this field,
+ // `all_paths_failed`, has since been removed).
// Generally, when we give up trying to retry a payment, we don't know for sure what the
// current state of the ChannelManager event queue is. Specifically, we cannot be sure that
// there are not multiple additional `PaymentPathFailed` or even `PaymentSent` events
let route_params = RouteParameters {
payment_params,
final_value_msat: amt_msat,
- final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
let mut route = Route {
route.paths[0][1].fee_msat = amt_msat;
nodes[0].router.expect_find_route(RouteParameters {
payment_params: second_payment_params,
- final_value_msat: amt_msat, final_cltv_expiry_delta: TEST_FINAL_CLTV,
+ final_value_msat: amt_msat,
}, Ok(route.clone()));
nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
// by adding the `PaymentFailed` event.
//
// Because we now retry payments as a batch, we simply return a single-path route in the
- // second, batched, request, have that fail, then complete the payment via `abandon_payment`.
+ // second, batched, request, have that fail, ensure the payment was abandoned.
let mut events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 4);
+ assert_eq!(events.len(), 3);
match events[0] {
Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => {
assert_eq!(payment_hash, ev_payment_hash);
},
_ => panic!("Unexpected event"),
}
- match events[3] {
- Event::PendingHTLCsForwardable { .. } => {},
- _ => panic!("Unexpected event"),
- }
nodes[0].node.process_pending_htlc_forwards();
let retry_htlc_updates = SendEvent::from_node(&nodes[0]);
commitment_signed_dance!(nodes[0], nodes[1], &bs_fail_update.commitment_signed, false, true);
let mut events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
+ assert_eq!(events.len(), 2);
match events[0] {
Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => {
assert_eq!(payment_hash, ev_payment_hash);
},
_ => panic!("Unexpected event"),
}
- nodes[0].node.abandon_payment(PaymentId(payment_hash.0));
- events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- match events[0] {
+ match events[1] {
Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
assert_eq!(payment_hash, *ev_payment_hash);
assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
let route_params = RouteParameters {
payment_params,
final_value_msat: amt_msat,
- final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
let mut route = Route {
route.paths.remove(0);
nodes[0].router.expect_find_route(RouteParameters {
payment_params: second_payment_params,
- final_value_msat: amt_msat / 2, final_cltv_expiry_delta: TEST_FINAL_CLTV,
+ final_value_msat: amt_msat / 2,
}, Ok(route.clone()));
nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
expect_pending_htlcs_forwardable!(nodes[2]);
expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat);
}
+
+#[test]
+#[cfg(feature = "std")]
+fn test_threaded_payment_retries() {
+ // In the first version of the in-`ChannelManager` payment retries, retries weren't limited to
+ // a single thread and would happily let multiple threads run retries at the same time. Because
+ // retries are done by first calculating the amount we need to retry, then dropping the
+ // relevant lock, then actually sending, we would happily let multiple threads retry the same
+ // amount at the same time, overpaying our original HTLC!
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ // There is one mitigating guardrail when retrying payments - we can never over-pay by more
+ // than 10% of the original value. Thus, we want all our retries to be below that. In order to
+ // keep things simple, we route one HTLC for 0.1% of the payment over channel 1 and the rest
+ // out over channel 3+4. This will let us ignore 99% of the payment value and deal with only
+ // our channel.
+ let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id;
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 10_000_000, 0);
+ let chan_3_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 10_000_000, 0).0.contents.short_channel_id;
+ let chan_4_scid = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 0).0.contents.short_channel_id;
+
+ let amt_msat = 100_000_000;
+ let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat);
+ #[cfg(feature = "std")]
+ let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
+ #[cfg(not(feature = "std"))]
+ let payment_expiry_secs = 60 * 60;
+ let mut invoice_features = InvoiceFeatures::empty();
+ invoice_features.set_variable_length_onion_required();
+ invoice_features.set_payment_secret_required();
+ invoice_features.set_basic_mpp_optional();
+ let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
+ .with_expiry_time(payment_expiry_secs as u64)
+ .with_features(invoice_features);
+ let mut route_params = RouteParameters {
+ payment_params,
+ final_value_msat: amt_msat,
+ };
+
+ let mut route = Route {
+ paths: vec![
+ vec![RouteHop {
+ pubkey: nodes[1].node.get_our_node_id(),
+ node_features: nodes[1].node.node_features(),
+ short_channel_id: chan_1_scid,
+ channel_features: nodes[1].node.channel_features(),
+ fee_msat: 0,
+ cltv_expiry_delta: 100,
+ }, RouteHop {
+ pubkey: nodes[3].node.get_our_node_id(),
+ node_features: nodes[2].node.node_features(),
+ short_channel_id: 42, // Set a random SCID which nodes[1] will fail as unknown
+ channel_features: nodes[2].node.channel_features(),
+ fee_msat: amt_msat / 1000,
+ cltv_expiry_delta: 100,
+ }],
+ vec![RouteHop {
+ pubkey: nodes[2].node.get_our_node_id(),
+ node_features: nodes[2].node.node_features(),
+ short_channel_id: chan_3_scid,
+ channel_features: nodes[2].node.channel_features(),
+ fee_msat: 100_000,
+ cltv_expiry_delta: 100,
+ }, RouteHop {
+ pubkey: nodes[3].node.get_our_node_id(),
+ node_features: nodes[3].node.node_features(),
+ short_channel_id: chan_4_scid,
+ channel_features: nodes[3].node.channel_features(),
+ fee_msat: amt_msat - amt_msat / 1000,
+ cltv_expiry_delta: 100,
+ }]
+ ],
+ payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
+ };
+ nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+
+ nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0xdeadbeef)).unwrap();
+ check_added_monitors!(nodes[0], 2);
+ let mut send_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(send_msg_events.len(), 2);
+ send_msg_events.retain(|msg|
+ if let MessageSendEvent::UpdateHTLCs { node_id, .. } = msg {
+ // Drop the commitment update for nodes[2], we can just let that one sit pending
+ // forever.
+ *node_id == nodes[1].node.get_our_node_id()
+ } else { panic!(); }
+ );
+
+ // from here on out, the retry `RouteParameters` amount will be amt/1000
+ route_params.final_value_msat /= 1000;
+ route.paths.pop();
+
+ let end_time = Instant::now() + Duration::from_secs(1);
+ macro_rules! thread_body { () => { {
+ // We really want std::thread::scope, but its not stable until 1.63. Until then, we get unsafe.
+ let node_ref = NodePtr::from_node(&nodes[0]);
+ move || {
+ let node_a = unsafe { &*node_ref.0 };
+ while Instant::now() < end_time {
+ node_a.node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable
+ // Ignore if we have any pending events, just always pretend we just got a
+ // PendingHTLCsForwardable
+ node_a.node.process_pending_htlc_forwards();
+ }
+ }
+ } } }
+ let mut threads = Vec::new();
+ for _ in 0..16 { threads.push(std::thread::spawn(thread_body!())); }
+
+ // Back in the main thread, poll pending messages and make sure that we never have more than
+ // one HTLC pending at a time. Note that the commitment_signed_dance will fail horribly if
+ // there are HTLC messages shoved in while its running. This allows us to test that we never
+ // generate an additional update_add_htlc until we've fully failed the first.
+ let mut previously_failed_channels = Vec::new();
+ loop {
+ assert_eq!(send_msg_events.len(), 1);
+ let send_event = SendEvent::from_event(send_msg_events.pop().unwrap());
+ assert_eq!(send_event.msgs.len(), 1);
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
+
+ // Note that we only push one route into `expect_find_route` at a time, because that's all
+ // the retries (should) need. If the bug is reintroduced "real" routes may be selected, but
+ // we should still ultimately fail for the same reason - because we're trying to send too
+ // many HTLCs at once.
+ let mut new_route_params = route_params.clone();
+ previously_failed_channels.push(route.paths[0][1].short_channel_id);
+ new_route_params.payment_params.previously_failed_channels = previously_failed_channels.clone();
+ route.paths[0][1].short_channel_id += 1;
+ nodes[0].router.expect_find_route(new_route_params, Ok(route.clone()));
+
+ let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
+ // The "normal" commitment_signed_dance delivers the final RAA and then calls
+ // `check_added_monitors` to ensure only the one RAA-generated monitor update was created.
+ // This races with our other threads which may generate an add-HTLCs commitment update via
+ // `process_pending_htlc_forwards`. Instead, we defer the monitor update check until after
+ // *we've* called `process_pending_htlc_forwards` when its guaranteed to have two updates.
+ let last_raa = commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true, false, true);
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &last_raa);
+
+ let cur_time = Instant::now();
+ if cur_time > end_time {
+ for thread in threads.drain(..) { thread.join().unwrap(); }
+ }
+
+ // Make sure we have some events to handle when we go around...
+ nodes[0].node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable
+ nodes[0].node.process_pending_htlc_forwards();
+ send_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ check_added_monitors!(nodes[0], 2);
+
+ if cur_time > end_time {
+ break;
+ }
+ }
+}
+
+fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) {
+ // Test that if we reload in the middle of an HTLC claim commitment signed dance we'll still
+ // receive the PaymentSent event even if the ChannelManager had no idea about the payment when
+ // it was last persisted.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let (persister_a, persister_b, persister_c);
+ let (chain_monitor_a, chain_monitor_b, chain_monitor_c);
+ let (nodes_0_deserialized, nodes_0_deserialized_b, nodes_0_deserialized_c);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+
+ let mut nodes_0_serialized = Vec::new();
+ if !persist_manager_with_payment {
+ nodes_0_serialized = nodes[0].node.encode();
+ }
+
+ let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+
+ if persist_manager_with_payment {
+ nodes_0_serialized = nodes[0].node.encode();
+ }
+
+ nodes[1].node.claim_funds(our_payment_preimage);
+ check_added_monitors!(nodes[1], 1);
+ expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
+
+ let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
+ check_added_monitors!(nodes[0], 1);
+
+ // The ChannelMonitor should always be the latest version, as we're required to persist it
+ // during the commitment signed handling.
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister_a, chain_monitor_a, nodes_0_deserialized);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[0] {} else { panic!(); }
+ if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); }
+ // Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid
+ // the double-claim that would otherwise appear at the end of this test.
+ let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(as_broadcasted_txn.len(), 1);
+
+ // Ensure that, even after some time, if we restart we still include *something* in the current
+ // `ChannelManager` which prevents a `PaymentFailed` when we restart even if pending resolved
+ // payments have since been timed out thanks to `IDEMPOTENCY_TIMEOUT_TICKS`.
+ // A naive implementation of the fix here would wipe the pending payments set, causing a
+ // failure event when we restart.
+ for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); }
+
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_b, chain_monitor_b, nodes_0_deserialized_b);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert!(events.is_empty());
+
+ // Ensure that we don't generate any further events even after the channel-closing commitment
+ // transaction is confirmed on-chain.
+ confirm_transaction(&nodes[0], &as_broadcasted_txn[0]);
+ for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); }
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert!(events.is_empty());
+
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_c, chain_monitor_c, nodes_0_deserialized_c);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert!(events.is_empty());
+}
+
+#[test]
+fn no_missing_sent_on_midpoint_reload() {
+ do_no_missing_sent_on_midpoint_reload(false);
+ do_no_missing_sent_on_midpoint_reload(true);
+}
use bitcoin::hashes::sha256::HashEngine as Sha256Engine;
use bitcoin::hashes::{HashEngine, Hash};
-/// Handler for BOLT1-compliant messages.
+/// A handler provided to [`PeerManager`] for reading and handling custom messages.
+///
+/// [BOLT 1] specifies a custom message type range for use with experimental or application-specific
+/// messages. `CustomMessageHandler` allows for user-defined handling of such types. See the
+/// [`lightning_custom_message`] crate for tools useful in composing more than one custom handler.
+///
+/// [BOLT 1]: https://github.com/lightning/bolts/blob/master/01-messaging.md
+/// [`lightning_custom_message`]: https://docs.rs/lightning_custom_message/latest/lightning_custom_message
pub trait CustomMessageHandler: wire::CustomMessageReader {
- /// Called with the message type that was received and the buffer to be read.
- /// Can return a `MessageHandlingError` if the message could not be handled.
+ /// Handles the given message sent from `sender_node_id`, possibly producing messages for
+ /// [`CustomMessageHandler::get_and_clear_pending_msg`] to return and thus for [`PeerManager`]
+ /// to send.
fn handle_custom_message(&self, msg: Self::CustomMessage, sender_node_id: &PublicKey) -> Result<(), LightningError>;
- /// Gets the list of pending messages which were generated by the custom message
- /// handler, clearing the list in the process. The first tuple element must
- /// correspond to the intended recipients node ids. If no connection to one of the
- /// specified node does not exist, the message is simply not sent to it.
+ /// Returns the list of pending messages that were generated by the handler, clearing the list
+ /// in the process. Each message is paired with the node id of the intended recipient. If no
+ /// connection to the node exists, then the message is simply not sent.
fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)>;
}
fn get_next_channel_announcement(&self, _starting_point: u64) ->
Option<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> { None }
fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option<msgs::NodeAnnouncement> { None }
- fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
+ fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
InitFeatures::empty()
}
+ fn processing_queue_high(&self) -> bool { false }
}
impl OnionMessageProvider for IgnoringMessageHandler {
fn next_onion_message_for_peer(&self, _peer_node_id: PublicKey) -> Option<msgs::OnionMessage> { None }
}
impl OnionMessageHandler for IgnoringMessageHandler {
fn handle_onion_message(&self, _their_node_id: &PublicKey, _msg: &msgs::OnionMessage) {}
- fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
- fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
+ fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
+ fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
InitFeatures::empty()
}
// msgs::ChannelUpdate does not contain the channel_id field, so we just drop them.
fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {}
- fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
- fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
+ fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
+ fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}
fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
/// generate no further read_event/write_buffer_space_avail/socket_disconnected calls for the
/// descriptor.
#[derive(Clone)]
-pub struct PeerHandleError {
- /// Used to indicate that we probably can't make any future connections to this peer (e.g.
- /// because we required features that our peer was missing, or vice versa).
- ///
- /// While LDK's [`ChannelManager`] will not do it automatically, you likely wish to force-close
- /// any channels with this peer or check for new versions of LDK.
- ///
- /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
- pub no_connection_possible: bool,
-}
+pub struct PeerHandleError { }
impl fmt::Debug for PeerHandleError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
formatter.write_str("Peer Sent Invalid Data")
/// We cache a `NodeId` here to avoid serializing peers' keys every time we forward gossip
/// messages in `PeerManager`. Use `Peer::set_their_node_id` to modify this field.
their_node_id: Option<(PublicKey, NodeId)>,
+ /// The features provided in the peer's [`msgs::Init`] message.
+ ///
+ /// This is set only after we've processed the [`msgs::Init`] message and called relevant
+ /// `peer_connected` handler methods. Thus, this field is set *iff* we've finished our
+ /// handshake and can talk to this peer normally (though use [`Peer::handshake_complete`] to
+ /// check this.
their_features: Option<InitFeatures>,
their_net_address: Option<NetAddress>,
awaiting_pong_timer_tick_intervals: i8,
received_message_since_timer_tick: bool,
sent_gossip_timestamp_filter: bool,
+
+ /// Indicates we've received a `channel_announcement` since the last time we had
+ /// [`PeerManager::gossip_processing_backlogged`] set (or, really, that we've received a
+ /// `channel_announcement` at all - we set this unconditionally but unset it every time we
+ /// check if we're gossip-processing-backlogged).
+ received_channel_announce_since_backlogged: bool,
+
+ inbound_connection: bool,
}
impl Peer {
+ /// True after we've processed the [`msgs::Init`] message and called relevant `peer_connected`
+ /// handler methods. Thus, this implies we've finished our handshake and can talk to this peer
+ /// normally.
+ fn handshake_complete(&self) -> bool {
+ self.their_features.is_some()
+ }
+
/// Returns true if the channel announcements/updates for the given channel should be
/// forwarded to this peer.
/// If we are sending our routing table to this peer and we have not yet sent channel
/// point and we shouldn't send it yet to avoid sending duplicate updates. If we've already
/// sent the old versions, we should send the update, and so return true here.
fn should_forward_channel_announcement(&self, channel_id: u64) -> bool {
+ if !self.handshake_complete() { return false; }
if self.their_features.as_ref().unwrap().supports_gossip_queries() &&
!self.sent_gossip_timestamp_filter {
return false;
/// Similar to the above, but for node announcements indexed by node_id.
fn should_forward_node_announcement(&self, node_id: NodeId) -> bool {
+ if !self.handshake_complete() { return false; }
if self.their_features.as_ref().unwrap().supports_gossip_queries() &&
!self.sent_gossip_timestamp_filter {
return false;
/// Returns whether we should be reading bytes from this peer, based on whether its outbound
/// buffer still has space and we don't need to pause reads to get some writes out.
- fn should_read(&self) -> bool {
- self.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE
+ fn should_read(&mut self, gossip_processing_backlogged: bool) -> bool {
+ if !gossip_processing_backlogged {
+ self.received_channel_announce_since_backlogged = false;
+ }
+ self.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE &&
+ (!gossip_processing_backlogged || !self.received_channel_announce_since_backlogged)
}
/// Determines if we should push additional gossip background sync (aka "backfill") onto a peer's
fn should_buffer_gossip_backfill(&self) -> bool {
self.pending_outbound_buffer.is_empty() && self.gossip_broadcast_buffer.is_empty()
&& self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
+ && self.handshake_complete()
}
/// Determines if we should push an onion message onto a peer's outbound buffer. This is checked
/// every time the peer's buffer may have been drained.
fn should_buffer_onion_message(&self) -> bool {
- self.pending_outbound_buffer.is_empty()
+ self.pending_outbound_buffer.is_empty() && self.handshake_complete()
&& self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
}
/// Determines if we should push additional gossip broadcast messages onto a peer's outbound
/// buffer. This is checked every time the peer's buffer may have been drained.
fn should_buffer_gossip_broadcast(&self) -> bool {
- self.pending_outbound_buffer.is_empty()
+ self.pending_outbound_buffer.is_empty() && self.handshake_complete()
&& self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
}
peer_counter: AtomicCounter,
+ gossip_processing_backlogged: AtomicBool,
+ gossip_processing_backlog_lifted: AtomicBool,
+
node_signer: NS,
logger: L,
blocked_event_processors: AtomicBool::new(false),
ephemeral_key_midstate,
peer_counter: AtomicCounter::new(),
+ gossip_processing_backlogged: AtomicBool::new(false),
+ gossip_processing_backlog_lifted: AtomicBool::new(false),
last_node_announcement_serial: AtomicU32::new(current_time),
logger,
custom_message_handler,
}
}
- /// Get the list of node ids for peers which have completed the initial handshake.
+ /// Get a list of tuples mapping from node id to network addresses for peers which have
+ /// completed the initial handshake.
+ ///
+ /// For outbound connections, the [`PublicKey`] will be the same as the `their_node_id` parameter
+ /// passed in to [`Self::new_outbound_connection`], however entries will only appear once the initial
+ /// handshake has completed and we are sure the remote peer has the private key for the given
+ /// [`PublicKey`].
///
- /// For outbound connections, this will be the same as the their_node_id parameter passed in to
- /// new_outbound_connection, however entries will only appear once the initial handshake has
- /// completed and we are sure the remote peer has the private key for the given node_id.
- pub fn get_peer_node_ids(&self) -> Vec<PublicKey> {
+ /// The returned `Option`s will only be `Some` if an address had been previously given via
+ /// [`Self::new_outbound_connection`] or [`Self::new_inbound_connection`].
+ pub fn get_peer_node_ids(&self) -> Vec<(PublicKey, Option<NetAddress>)> {
let peers = self.peers.read().unwrap();
peers.values().filter_map(|peer_mutex| {
let p = peer_mutex.lock().unwrap();
- if !p.channel_encryptor.is_ready_for_encryption() || p.their_features.is_none() {
+ if !p.handshake_complete() {
return None;
}
- p.their_node_id
- }).map(|(node_id, _)| node_id).collect()
+ Some((p.their_node_id.unwrap().0, p.their_net_address.clone()))
+ }).collect()
}
fn get_ephemeral_key(&self) -> SecretKey {
SecretKey::from_slice(&Sha256::from_engine(ephemeral_hash).into_inner()).expect("You broke SHA-256!")
}
- /// Indicates a new outbound connection has been established to a node with the given node_id
+ /// Indicates a new outbound connection has been established to a node with the given `node_id`
/// and an optional remote network address.
///
/// The remote network address adds the option to report a remote IP address back to a connecting
let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes
let mut peers = self.peers.write().unwrap();
- if peers.insert(descriptor, Mutex::new(Peer {
- channel_encryptor: peer_encryptor,
- their_node_id: None,
- their_features: None,
- their_net_address: remote_network_address,
-
- pending_outbound_buffer: LinkedList::new(),
- pending_outbound_buffer_first_msg_offset: 0,
- gossip_broadcast_buffer: LinkedList::new(),
- awaiting_write_event: false,
-
- pending_read_buffer,
- pending_read_buffer_pos: 0,
- pending_read_is_header: false,
-
- sync_status: InitSyncTracker::NoSyncRequested,
-
- msgs_sent_since_pong: 0,
- awaiting_pong_timer_tick_intervals: 0,
- received_message_since_timer_tick: false,
- sent_gossip_timestamp_filter: false,
- })).is_some() {
- panic!("PeerManager driver duplicated descriptors!");
- };
- Ok(res)
+ match peers.entry(descriptor) {
+ hash_map::Entry::Occupied(_) => {
+ debug_assert!(false, "PeerManager driver duplicated descriptors!");
+ Err(PeerHandleError {})
+ },
+ hash_map::Entry::Vacant(e) => {
+ e.insert(Mutex::new(Peer {
+ channel_encryptor: peer_encryptor,
+ their_node_id: None,
+ their_features: None,
+ their_net_address: remote_network_address,
+
+ pending_outbound_buffer: LinkedList::new(),
+ pending_outbound_buffer_first_msg_offset: 0,
+ gossip_broadcast_buffer: LinkedList::new(),
+ awaiting_write_event: false,
+
+ pending_read_buffer,
+ pending_read_buffer_pos: 0,
+ pending_read_is_header: false,
+
+ sync_status: InitSyncTracker::NoSyncRequested,
+
+ msgs_sent_since_pong: 0,
+ awaiting_pong_timer_tick_intervals: 0,
+ received_message_since_timer_tick: false,
+ sent_gossip_timestamp_filter: false,
+
+ received_channel_announce_since_backlogged: false,
+ inbound_connection: false,
+ }));
+ Ok(res)
+ }
+ }
}
/// Indicates a new inbound connection has been established to a node with an optional remote
let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes
let mut peers = self.peers.write().unwrap();
- if peers.insert(descriptor, Mutex::new(Peer {
- channel_encryptor: peer_encryptor,
- their_node_id: None,
- their_features: None,
- their_net_address: remote_network_address,
-
- pending_outbound_buffer: LinkedList::new(),
- pending_outbound_buffer_first_msg_offset: 0,
- gossip_broadcast_buffer: LinkedList::new(),
- awaiting_write_event: false,
-
- pending_read_buffer,
- pending_read_buffer_pos: 0,
- pending_read_is_header: false,
-
- sync_status: InitSyncTracker::NoSyncRequested,
-
- msgs_sent_since_pong: 0,
- awaiting_pong_timer_tick_intervals: 0,
- received_message_since_timer_tick: false,
- sent_gossip_timestamp_filter: false,
- })).is_some() {
- panic!("PeerManager driver duplicated descriptors!");
- };
- Ok(())
+ match peers.entry(descriptor) {
+ hash_map::Entry::Occupied(_) => {
+ debug_assert!(false, "PeerManager driver duplicated descriptors!");
+ Err(PeerHandleError {})
+ },
+ hash_map::Entry::Vacant(e) => {
+ e.insert(Mutex::new(Peer {
+ channel_encryptor: peer_encryptor,
+ their_node_id: None,
+ their_features: None,
+ their_net_address: remote_network_address,
+
+ pending_outbound_buffer: LinkedList::new(),
+ pending_outbound_buffer_first_msg_offset: 0,
+ gossip_broadcast_buffer: LinkedList::new(),
+ awaiting_write_event: false,
+
+ pending_read_buffer,
+ pending_read_buffer_pos: 0,
+ pending_read_is_header: false,
+
+ sync_status: InitSyncTracker::NoSyncRequested,
+
+ msgs_sent_since_pong: 0,
+ awaiting_pong_timer_tick_intervals: 0,
+ received_message_since_timer_tick: false,
+ sent_gossip_timestamp_filter: false,
+
+ received_channel_announce_since_backlogged: false,
+ inbound_connection: true,
+ }));
+ Ok(())
+ }
+ }
+ }
+
+ fn peer_should_read(&self, peer: &mut Peer) -> bool {
+ peer.should_read(self.gossip_processing_backlogged.load(Ordering::Relaxed))
+ }
+
+ fn update_gossip_backlogged(&self) {
+ let new_state = self.message_handler.route_handler.processing_queue_high();
+ let prev_state = self.gossip_processing_backlogged.swap(new_state, Ordering::Relaxed);
+ if prev_state && !new_state {
+ self.gossip_processing_backlog_lifted.store(true, Ordering::Relaxed);
+ }
}
- fn do_attempt_write_data(&self, descriptor: &mut Descriptor, peer: &mut Peer) {
+ fn do_attempt_write_data(&self, descriptor: &mut Descriptor, peer: &mut Peer, force_one_write: bool) {
+ let mut have_written = false;
while !peer.awaiting_write_event {
if peer.should_buffer_onion_message() {
if let Some((peer_node_id, _)) = peer.their_node_id {
self.maybe_send_extra_ping(peer);
}
+ let should_read = self.peer_should_read(peer);
let next_buff = match peer.pending_outbound_buffer.front() {
- None => return,
+ None => {
+ if force_one_write && !have_written {
+ if should_read {
+ let data_sent = descriptor.send_data(&[], should_read);
+ debug_assert_eq!(data_sent, 0, "Can't write more than no data");
+ }
+ }
+ return
+ },
Some(buff) => buff,
};
let pending = &next_buff[peer.pending_outbound_buffer_first_msg_offset..];
- let data_sent = descriptor.send_data(pending, peer.should_read());
+ let data_sent = descriptor.send_data(pending, should_read);
+ have_written = true;
peer.pending_outbound_buffer_first_msg_offset += data_sent;
if peer.pending_outbound_buffer_first_msg_offset == next_buff.len() {
peer.pending_outbound_buffer_first_msg_offset = 0;
// This is most likely a simple race condition where the user found that the socket
// was writeable, then we told the user to `disconnect_socket()`, then they called
// this method. Return an error to make sure we get disconnected.
- return Err(PeerHandleError { no_connection_possible: false });
+ return Err(PeerHandleError { });
},
Some(peer_mutex) => {
let mut peer = peer_mutex.lock().unwrap();
peer.awaiting_write_event = false;
- self.do_attempt_write_data(descriptor, &mut peer);
+ self.do_attempt_write_data(descriptor, &mut peer, false);
}
};
Ok(())
/// [`send_data`] call on this descriptor has `resume_read` set (preventing DoS issues in the
/// send buffer).
///
+ /// In order to avoid processing too many messages at once per peer, `data` should be on the
+ /// order of 4KiB.
+ ///
/// [`send_data`]: SocketDescriptor::send_data
/// [`process_events`]: PeerManager::process_events
pub fn read_event(&self, peer_descriptor: &mut Descriptor, data: &[u8]) -> Result<bool, PeerHandleError> {
Ok(res) => Ok(res),
Err(e) => {
log_trace!(self.logger, "Peer sent invalid data or we decided to disconnect due to a protocol error");
- self.disconnect_event_internal(peer_descriptor, e.no_connection_possible);
+ self.disconnect_event_internal(peer_descriptor);
Err(e)
}
}
// This is most likely a simple race condition where the user read some bytes
// from the socket, then we told the user to `disconnect_socket()`, then they
// called this method. Return an error to make sure we get disconnected.
- return Err(PeerHandleError { no_connection_possible: false });
+ return Err(PeerHandleError { });
},
Some(peer_mutex) => {
let mut read_pos = 0;
msgs::ErrorAction::DisconnectPeer { msg: _ } => {
//TODO: Try to push msg
log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
- return Err(PeerHandleError{ no_connection_possible: false });
+ return Err(PeerHandleError { });
},
msgs::ErrorAction::IgnoreAndLog(level) => {
log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
macro_rules! insert_node_id {
() => {
match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap().0) {
- hash_map::Entry::Occupied(_) => {
+ hash_map::Entry::Occupied(e) => {
log_trace!(self.logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0));
peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event
- return Err(PeerHandleError{ no_connection_possible: false })
+ // Check that the peers map is consistent with the
+ // node_id_to_descriptor map, as this has been broken
+ // before.
+ debug_assert!(peers.get(e.get()).is_some());
+ return Err(PeerHandleError { })
},
hash_map::Entry::Vacant(entry) => {
log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap().0));
if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); }
peer.pending_read_buffer.resize(msg_len as usize + 16, 0);
if msg_len < 2 { // Need at least the message type tag
- return Err(PeerHandleError{ no_connection_possible: false });
+ return Err(PeerHandleError { });
}
peer.pending_read_is_header = false;
} else {
(msgs::DecodeError::UnknownRequiredFeature, ty) => {
log_gossip!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!");
self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: format!("Received an unknown required feature/TLV in message type {:?}", ty) });
- return Err(PeerHandleError { no_connection_possible: false });
+ return Err(PeerHandleError { });
}
- (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { no_connection_possible: false }),
+ (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { }),
(msgs::DecodeError::InvalidValue, _) => {
log_debug!(self.logger, "Got an invalid value while deserializing message");
- return Err(PeerHandleError { no_connection_possible: false });
+ return Err(PeerHandleError { });
}
(msgs::DecodeError::ShortRead, _) => {
log_debug!(self.logger, "Deserialization failed due to shortness of message");
- return Err(PeerHandleError { no_connection_possible: false });
+ return Err(PeerHandleError { });
}
- (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { no_connection_possible: false }),
- (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { no_connection_possible: false }),
+ (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { }),
+ (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { }),
}
}
};
}
}
}
- pause_read = !peer.should_read();
+ pause_read = !self.peer_should_read(peer);
if let Some(message) = msg_to_handle {
match self.handle_message(&peer_mutex, peer_lock, message) {
if let wire::Message::Init(msg) = message {
if msg.features.requires_unknown_bits() {
log_debug!(self.logger, "Peer features required unknown version bits");
- return Err(PeerHandleError{ no_connection_possible: true }.into());
+ return Err(PeerHandleError { }.into());
}
if peer_lock.their_features.is_some() {
- return Err(PeerHandleError{ no_connection_possible: false }.into());
+ return Err(PeerHandleError { }.into());
}
log_info!(self.logger, "Received peer Init message from {}: {}", log_pubkey!(their_node_id), msg.features);
peer_lock.sync_status = InitSyncTracker::ChannelsSyncing(0);
}
- if let Err(()) = self.message_handler.route_handler.peer_connected(&their_node_id, &msg) {
+ if let Err(()) = self.message_handler.route_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
log_debug!(self.logger, "Route Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
- return Err(PeerHandleError{ no_connection_possible: true }.into());
+ return Err(PeerHandleError { }.into());
}
- if let Err(()) = self.message_handler.chan_handler.peer_connected(&their_node_id, &msg) {
+ if let Err(()) = self.message_handler.chan_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
log_debug!(self.logger, "Channel Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
- return Err(PeerHandleError{ no_connection_possible: true }.into());
+ return Err(PeerHandleError { }.into());
}
- if let Err(()) = self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg) {
+ if let Err(()) = self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
log_debug!(self.logger, "Onion Message Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
- return Err(PeerHandleError{ no_connection_possible: true }.into());
+ return Err(PeerHandleError { }.into());
}
peer_lock.their_features = Some(msg.features);
return Ok(None);
} else if peer_lock.their_features.is_none() {
log_debug!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(their_node_id));
- return Err(PeerHandleError{ no_connection_possible: false }.into());
+ return Err(PeerHandleError { }.into());
}
if let wire::Message::GossipTimestampFilter(_msg) = message {
return Ok(None);
}
+ if let wire::Message::ChannelAnnouncement(ref _msg) = message {
+ peer_lock.received_channel_announce_since_backlogged = true;
+ }
+
mem::drop(peer_lock);
if is_gossip_msg(message.type_id()) {
}
self.message_handler.chan_handler.handle_error(&their_node_id, &msg);
if msg.channel_id == [0; 32] {
- return Err(PeerHandleError{ no_connection_possible: true }.into());
+ return Err(PeerHandleError { }.into());
}
},
wire::Message::Warning(msg) => {
.map_err(|e| -> MessageHandlingError { e.into() })? {
should_forward = Some(wire::Message::ChannelAnnouncement(msg));
}
+ self.update_gossip_backlogged();
},
wire::Message::NodeAnnouncement(msg) => {
if self.message_handler.route_handler.handle_node_announcement(&msg)
.map_err(|e| -> MessageHandlingError { e.into() })? {
should_forward = Some(wire::Message::NodeAnnouncement(msg));
}
+ self.update_gossip_backlogged();
},
wire::Message::ChannelUpdate(msg) => {
self.message_handler.chan_handler.handle_channel_update(&their_node_id, &msg);
.map_err(|e| -> MessageHandlingError { e.into() })? {
should_forward = Some(wire::Message::ChannelUpdate(msg));
}
+ self.update_gossip_backlogged();
},
wire::Message::QueryShortChannelIds(msg) => {
self.message_handler.route_handler.handle_query_short_channel_ids(&their_node_id, msg)?;
// Unknown messages:
wire::Message::Unknown(type_id) if message.is_even() => {
log_debug!(self.logger, "Received unknown even message of type {}, disconnecting peer!", type_id);
- // Fail the channel if message is an even, unknown type as per BOLT #1.
- return Err(PeerHandleError{ no_connection_possible: true }.into());
+ return Err(PeerHandleError { }.into());
},
wire::Message::Unknown(type_id) => {
log_trace!(self.logger, "Received unknown odd message of type {}, ignoring", type_id);
for (_, peer_mutex) in peers.iter() {
let mut peer = peer_mutex.lock().unwrap();
- if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
+ if !peer.handshake_complete() ||
!peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
continue
}
+ debug_assert!(peer.their_node_id.is_some());
+ debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
if peer.buffer_full_drop_gossip_broadcast() {
log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
for (_, peer_mutex) in peers.iter() {
let mut peer = peer_mutex.lock().unwrap();
- if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
+ if !peer.handshake_complete() ||
!peer.should_forward_node_announcement(msg.contents.node_id) {
continue
}
+ debug_assert!(peer.their_node_id.is_some());
+ debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
if peer.buffer_full_drop_gossip_broadcast() {
log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
for (_, peer_mutex) in peers.iter() {
let mut peer = peer_mutex.lock().unwrap();
- if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
+ if !peer.handshake_complete() ||
!peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
continue
}
+ debug_assert!(peer.their_node_id.is_some());
+ debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
if peer.buffer_full_drop_gossip_broadcast() {
log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
}
+ self.update_gossip_backlogged();
+ let flush_read_disabled = self.gossip_processing_backlog_lifted.swap(false, Ordering::Relaxed);
+
let mut peers_to_disconnect = HashMap::new();
let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events();
events_generated.append(&mut self.message_handler.route_handler.get_and_clear_pending_msg_events());
Some(descriptor) => match peers.get(&descriptor) {
Some(peer_mutex) => {
let peer_lock = peer_mutex.lock().unwrap();
- if peer_lock.their_features.is_none() {
+ if !peer_lock.handshake_complete() {
continue;
}
peer_lock
self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None),
_ => {},
}
- match self.message_handler.route_handler.handle_channel_update(&update_msg) {
- Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
- self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(update_msg), None),
- _ => {},
+ if let Some(msg) = update_msg {
+ match self.message_handler.route_handler.handle_channel_update(&msg) {
+ Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+ self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None),
+ _ => {},
+ }
}
},
MessageSendEvent::BroadcastChannelUpdate { msg } => {
_ => {},
}
},
+ MessageSendEvent::BroadcastNodeAnnouncement { msg } => {
+ log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler for node {}", msg.contents.node_id);
+ match self.message_handler.route_handler.handle_node_announcement(&msg) {
+ Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+ self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None),
+ _ => {},
+ }
+ },
MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
log_trace!(self.logger, "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
log_pubkey!(node_id), msg.contents.short_channel_id);
}
for (descriptor, peer_mutex) in peers.iter() {
- self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer_mutex.lock().unwrap());
+ let mut peer = peer_mutex.lock().unwrap();
+ if flush_read_disabled { peer.received_channel_announce_since_backlogged = false; }
+ self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer, flush_read_disabled);
}
}
if !peers_to_disconnect.is_empty() {
// thread can be holding the peer lock if we have the global write
// lock).
- if let Some(mut descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
+ let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
+ if let Some(mut descriptor) = descriptor_opt {
if let Some(peer_mutex) = peers.remove(&descriptor) {
+ let mut peer = peer_mutex.lock().unwrap();
if let Some(msg) = msg {
log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
log_pubkey!(node_id),
msg.data);
- let mut peer = peer_mutex.lock().unwrap();
self.enqueue_message(&mut *peer, &msg);
// This isn't guaranteed to work, but if there is enough free
// room in the send buffer, put the error message there...
- self.do_attempt_write_data(&mut descriptor, &mut *peer);
- } else {
- log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id));
+ self.do_attempt_write_data(&mut descriptor, &mut *peer, false);
}
- }
- descriptor.disconnect_socket();
- self.message_handler.chan_handler.peer_disconnected(&node_id, false);
- self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
+ self.do_disconnect(descriptor, &*peer, "DisconnectPeer HandleError");
+ } else { debug_assert!(false, "Missing connection for peer"); }
}
}
}
/// Indicates that the given socket descriptor's connection is now closed.
pub fn socket_disconnected(&self, descriptor: &Descriptor) {
- self.disconnect_event_internal(descriptor, false);
+ self.disconnect_event_internal(descriptor);
}
- fn disconnect_event_internal(&self, descriptor: &Descriptor, no_connection_possible: bool) {
+ fn do_disconnect(&self, mut descriptor: Descriptor, peer: &Peer, reason: &'static str) {
+ if !peer.handshake_complete() {
+ log_trace!(self.logger, "Disconnecting peer which hasn't completed handshake due to {}", reason);
+ descriptor.disconnect_socket();
+ return;
+ }
+
+ debug_assert!(peer.their_node_id.is_some());
+ if let Some((node_id, _)) = peer.their_node_id {
+ log_trace!(self.logger, "Disconnecting peer with id {} due to {}", node_id, reason);
+ self.message_handler.chan_handler.peer_disconnected(&node_id);
+ self.message_handler.onion_message_handler.peer_disconnected(&node_id);
+ }
+ descriptor.disconnect_socket();
+ }
+
+ fn disconnect_event_internal(&self, descriptor: &Descriptor) {
let mut peers = self.peers.write().unwrap();
let peer_option = peers.remove(descriptor);
match peer_option {
Some(peer_lock) => {
let peer = peer_lock.lock().unwrap();
if let Some((node_id, _)) = peer.their_node_id {
- log_trace!(self.logger,
- "Handling disconnection of peer {}, with {}future connection to the peer possible.",
- log_pubkey!(node_id), if no_connection_possible { "no " } else { "" });
- self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
- self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
- self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
+ log_trace!(self.logger, "Handling disconnection of peer {}", log_pubkey!(node_id));
+ let removed = self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
+ debug_assert!(removed.is_some(), "descriptor maps should be consistent");
+ if !peer.handshake_complete() { return; }
+ self.message_handler.chan_handler.peer_disconnected(&node_id);
+ self.message_handler.onion_message_handler.peer_disconnected(&node_id);
}
}
};
/// Disconnect a peer given its node id.
///
- /// Set `no_connection_possible` to true to prevent any further connection with this peer,
- /// force-closing any channels we have with it.
- ///
/// If a peer is connected, this will call [`disconnect_socket`] on the descriptor for the
/// peer. Thus, be very careful about reentrancy issues.
///
/// [`disconnect_socket`]: SocketDescriptor::disconnect_socket
- pub fn disconnect_by_node_id(&self, node_id: PublicKey, no_connection_possible: bool) {
+ pub fn disconnect_by_node_id(&self, node_id: PublicKey) {
let mut peers_lock = self.peers.write().unwrap();
- if let Some(mut descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
- log_trace!(self.logger, "Disconnecting peer with id {} due to client request", node_id);
- peers_lock.remove(&descriptor);
- self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
- self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
- descriptor.disconnect_socket();
+ if let Some(descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
+ let peer_opt = peers_lock.remove(&descriptor);
+ if let Some(peer_mutex) = peer_opt {
+ self.do_disconnect(descriptor, &*peer_mutex.lock().unwrap(), "client request");
+ } else { debug_assert!(false, "node_id_to_descriptor thought we had a peer"); }
}
}
let mut peers_lock = self.peers.write().unwrap();
self.node_id_to_descriptor.lock().unwrap().clear();
let peers = &mut *peers_lock;
- for (mut descriptor, peer) in peers.drain() {
- if let Some((node_id, _)) = peer.lock().unwrap().their_node_id {
- log_trace!(self.logger, "Disconnecting peer with id {} due to client request to disconnect all peers", node_id);
- self.message_handler.chan_handler.peer_disconnected(&node_id, false);
- self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
- }
- descriptor.disconnect_socket();
+ for (descriptor, peer_mutex) in peers.drain() {
+ self.do_disconnect(descriptor, &*peer_mutex.lock().unwrap(), "client request to disconnect all peers");
}
}
{
let peers_lock = self.peers.read().unwrap();
+ self.update_gossip_backlogged();
+ let flush_read_disabled = self.gossip_processing_backlog_lifted.swap(false, Ordering::Relaxed);
+
for (descriptor, peer_mutex) in peers_lock.iter() {
let mut peer = peer_mutex.lock().unwrap();
- if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_node_id.is_none() {
+ if flush_read_disabled { peer.received_channel_announce_since_backlogged = false; }
+
+ if !peer.handshake_complete() {
// The peer needs to complete its handshake before we can exchange messages. We
// give peers one timer tick to complete handshake, reusing
// `awaiting_pong_timer_tick_intervals` to track number of timer ticks taken
}
continue;
}
+ debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
+ debug_assert!(peer.their_node_id.is_some());
- if peer.awaiting_pong_timer_tick_intervals == -1 {
- // Magic value set in `maybe_send_extra_ping`.
- peer.awaiting_pong_timer_tick_intervals = 1;
+ loop { // Used as a `goto` to skip writing a Ping message.
+ if peer.awaiting_pong_timer_tick_intervals == -1 {
+ // Magic value set in `maybe_send_extra_ping`.
+ peer.awaiting_pong_timer_tick_intervals = 1;
+ peer.received_message_since_timer_tick = false;
+ break;
+ }
+
+ if (peer.awaiting_pong_timer_tick_intervals > 0 && !peer.received_message_since_timer_tick)
+ || peer.awaiting_pong_timer_tick_intervals as u64 >
+ MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peers_lock.len() as u64
+ {
+ descriptors_needing_disconnect.push(descriptor.clone());
+ break;
+ }
peer.received_message_since_timer_tick = false;
- continue;
- }
- if (peer.awaiting_pong_timer_tick_intervals > 0 && !peer.received_message_since_timer_tick)
- || peer.awaiting_pong_timer_tick_intervals as u64 >
- MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peers_lock.len() as u64
- {
- descriptors_needing_disconnect.push(descriptor.clone());
- continue;
- }
- peer.received_message_since_timer_tick = false;
+ if peer.awaiting_pong_timer_tick_intervals > 0 {
+ peer.awaiting_pong_timer_tick_intervals += 1;
+ break;
+ }
- if peer.awaiting_pong_timer_tick_intervals > 0 {
- peer.awaiting_pong_timer_tick_intervals += 1;
- continue;
+ peer.awaiting_pong_timer_tick_intervals = 1;
+ let ping = msgs::Ping {
+ ponglen: 0,
+ byteslen: 64,
+ };
+ self.enqueue_message(&mut *peer, &ping);
+ break;
}
-
- peer.awaiting_pong_timer_tick_intervals = 1;
- let ping = msgs::Ping {
- ponglen: 0,
- byteslen: 64,
- };
- self.enqueue_message(&mut *peer, &ping);
- self.do_attempt_write_data(&mut (descriptor.clone()), &mut *peer);
+ self.do_attempt_write_data(&mut (descriptor.clone()), &mut *peer, flush_read_disabled);
}
}
if !descriptors_needing_disconnect.is_empty() {
{
let mut peers_lock = self.peers.write().unwrap();
- for descriptor in descriptors_needing_disconnect.iter() {
- if let Some(peer) = peers_lock.remove(descriptor) {
- if let Some((node_id, _)) = peer.lock().unwrap().their_node_id {
- log_trace!(self.logger, "Disconnecting peer with id {} due to ping timeout", node_id);
+ for descriptor in descriptors_needing_disconnect {
+ if let Some(peer_mutex) = peers_lock.remove(&descriptor) {
+ let peer = peer_mutex.lock().unwrap();
+ if let Some((node_id, _)) = peer.their_node_id {
self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
- self.message_handler.chan_handler.peer_disconnected(&node_id, false);
- self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
}
+ self.do_disconnect(descriptor, &*peer, "ping timeout");
}
}
}
-
- for mut descriptor in descriptors_needing_disconnect.drain(..) {
- descriptor.disconnect_socket();
- }
}
}
#[cfg(test)]
mod tests {
use crate::chain::keysinterface::{NodeSigner, Recipient};
+ use crate::ln::peer_channel_encryptor::PeerChannelEncryptor;
use crate::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
use crate::ln::{msgs, wire};
use crate::ln::msgs::NetAddress;
use crate::prelude::*;
use crate::sync::{Arc, Mutex};
- use core::sync::atomic::Ordering;
+ use core::sync::atomic::{AtomicBool, Ordering};
#[derive(Clone)]
struct FileDescriptor {
fd: u16,
outbound_data: Arc<Mutex<Vec<u8>>>,
+ disconnect: Arc<AtomicBool>,
}
impl PartialEq for FileDescriptor {
fn eq(&self, other: &Self) -> bool {
data.len()
}
- fn disconnect_socket(&mut self) {}
+ fn disconnect_socket(&mut self) { self.disconnect.store(true, Ordering::Release); }
}
struct PeerManagerCfg {
}
fn establish_connection<'a>(peer_a: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler, &'a test_utils::TestNodeSigner>, peer_b: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler, &'a test_utils::TestNodeSigner>) -> (FileDescriptor, FileDescriptor) {
- let a_id = peer_a.node_signer.get_node_id(Recipient::Node).unwrap();
- let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
- let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
- let initial_data = peer_b.new_outbound_connection(a_id, fd_b.clone(), None).unwrap();
- peer_a.new_inbound_connection(fd_a.clone(), None).unwrap();
+ let id_a = peer_a.node_signer.get_node_id(Recipient::Node).unwrap();
+ let mut fd_a = FileDescriptor {
+ fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+ disconnect: Arc::new(AtomicBool::new(false)),
+ };
+ let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+ let id_b = peer_b.node_signer.get_node_id(Recipient::Node).unwrap();
+ let mut fd_b = FileDescriptor {
+ fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+ disconnect: Arc::new(AtomicBool::new(false)),
+ };
+ let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+ let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
+ peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
peer_a.process_events();
let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false);
+ assert!(peer_a.get_peer_node_ids().contains(&(id_b, Some(addr_b))));
+ assert!(peer_b.get_peer_node_ids().contains(&(id_a, Some(addr_a))));
+
(fd_a.clone(), fd_b.clone())
}
+ #[test]
+ #[cfg(feature = "std")]
+ fn fuzz_threaded_connections() {
+ // Spawn two threads which repeatedly connect two peers together, leading to "got second
+ // connection with peer" disconnections and rapid reconnect. This previously found an issue
+ // with our internal map consistency, and is a generally good smoke test of disconnection.
+ let cfgs = Arc::new(create_peermgr_cfgs(2));
+ // Until we have std::thread::scoped we have to unsafe { turn off the borrow checker }.
+ let peers = Arc::new(create_network(2, unsafe { &*(&*cfgs as *const _) as &'static _ }));
+
+ let start_time = std::time::Instant::now();
+ macro_rules! spawn_thread { ($id: expr) => { {
+ let peers = Arc::clone(&peers);
+ let cfgs = Arc::clone(&cfgs);
+ std::thread::spawn(move || {
+ let mut ctr = 0;
+ while start_time.elapsed() < std::time::Duration::from_secs(1) {
+ let id_a = peers[0].node_signer.get_node_id(Recipient::Node).unwrap();
+ let mut fd_a = FileDescriptor {
+ fd: $id + ctr * 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
+ disconnect: Arc::new(AtomicBool::new(false)),
+ };
+ let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+ let mut fd_b = FileDescriptor {
+ fd: $id + ctr * 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
+ disconnect: Arc::new(AtomicBool::new(false)),
+ };
+ let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+ let initial_data = peers[1].new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
+ peers[0].new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
+ if peers[0].read_event(&mut fd_a, &initial_data).is_err() { break; }
+
+ while start_time.elapsed() < std::time::Duration::from_secs(1) {
+ peers[0].process_events();
+ if fd_a.disconnect.load(Ordering::Acquire) { break; }
+ let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+ if peers[1].read_event(&mut fd_b, &a_data).is_err() { break; }
+
+ peers[1].process_events();
+ if fd_b.disconnect.load(Ordering::Acquire) { break; }
+ let b_data = fd_b.outbound_data.lock().unwrap().split_off(0);
+ if peers[0].read_event(&mut fd_a, &b_data).is_err() { break; }
+
+ cfgs[0].chan_handler.pending_events.lock().unwrap()
+ .push(crate::util::events::MessageSendEvent::SendShutdown {
+ node_id: peers[1].node_signer.get_node_id(Recipient::Node).unwrap(),
+ msg: msgs::Shutdown {
+ channel_id: [0; 32],
+ scriptpubkey: bitcoin::Script::new(),
+ },
+ });
+ cfgs[1].chan_handler.pending_events.lock().unwrap()
+ .push(crate::util::events::MessageSendEvent::SendShutdown {
+ node_id: peers[0].node_signer.get_node_id(Recipient::Node).unwrap(),
+ msg: msgs::Shutdown {
+ channel_id: [0; 32],
+ scriptpubkey: bitcoin::Script::new(),
+ },
+ });
+
+ if ctr % 2 == 0 {
+ peers[0].timer_tick_occurred();
+ peers[1].timer_tick_occurred();
+ }
+ }
+
+ peers[0].socket_disconnected(&fd_a);
+ peers[1].socket_disconnected(&fd_b);
+ ctr += 1;
+ std::thread::sleep(std::time::Duration::from_micros(1));
+ }
+ })
+ } } }
+ let thrd_a = spawn_thread!(1);
+ let thrd_b = spawn_thread!(2);
+
+ thrd_a.join().unwrap();
+ thrd_b.join().unwrap();
+ }
+
#[test]
fn test_disconnect_peer() {
// Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and
// push a DisconnectPeer event to remove the node flagged by id
let cfgs = create_peermgr_cfgs(2);
- let chan_handler = test_utils::TestChannelMessageHandler::new();
- let mut peers = create_network(2, &cfgs);
+ let peers = create_network(2, &cfgs);
establish_connection(&peers[0], &peers[1]);
assert_eq!(peers[0].peers.read().unwrap().len(), 1);
let their_id = peers[1].node_signer.get_node_id(Recipient::Node).unwrap();
-
- chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::HandleError {
+ cfgs[0].chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::HandleError {
node_id: their_id,
action: msgs::ErrorAction::DisconnectPeer { msg: None },
});
- assert_eq!(chan_handler.pending_events.lock().unwrap().len(), 1);
- peers[0].message_handler.chan_handler = &chan_handler;
peers[0].process_events();
assert_eq!(peers[0].peers.read().unwrap().len(), 0);
assert_eq!(peers[1].read_event(&mut fd_b, &a_data).unwrap(), false);
}
+ #[test]
+ fn test_non_init_first_msg() {
+ // Simple test of the first message received over a connection being something other than
+ // Init. This results in an immediate disconnection, which previously included a spurious
+ // peer_disconnected event handed to event handlers (which would panic in
+ // `TestChannelMessageHandler` here).
+ let cfgs = create_peermgr_cfgs(2);
+ let peers = create_network(2, &cfgs);
+
+ let mut fd_dup = FileDescriptor {
+ fd: 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
+ disconnect: Arc::new(AtomicBool::new(false)),
+ };
+ let addr_dup = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1003};
+ let id_a = cfgs[0].node_signer.get_node_id(Recipient::Node).unwrap();
+ peers[0].new_inbound_connection(fd_dup.clone(), Some(addr_dup.clone())).unwrap();
+
+ let mut dup_encryptor = PeerChannelEncryptor::new_outbound(id_a, SecretKey::from_slice(&[42; 32]).unwrap());
+ let initial_data = dup_encryptor.get_act_one(&peers[1].secp_ctx);
+ assert_eq!(peers[0].read_event(&mut fd_dup, &initial_data).unwrap(), false);
+ peers[0].process_events();
+
+ let a_data = fd_dup.outbound_data.lock().unwrap().split_off(0);
+ let (act_three, _) =
+ dup_encryptor.process_act_two(&a_data[..], &&cfgs[1].node_signer).unwrap();
+ assert_eq!(peers[0].read_event(&mut fd_dup, &act_three).unwrap(), false);
+
+ let not_init_msg = msgs::Ping { ponglen: 4, byteslen: 0 };
+ let msg_bytes = dup_encryptor.encrypt_message(¬_init_msg);
+ assert!(peers[0].read_event(&mut fd_dup, &msg_bytes).is_err());
+ }
+
#[test]
fn test_disconnect_all_peer() {
// Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and
let peers = create_network(2, &cfgs);
let a_id = peers[0].node_signer.get_node_id(Recipient::Node).unwrap();
- let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
- let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
+ let mut fd_a = FileDescriptor {
+ fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+ disconnect: Arc::new(AtomicBool::new(false)),
+ };
+ let mut fd_b = FileDescriptor {
+ fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+ disconnect: Arc::new(AtomicBool::new(false)),
+ };
let initial_data = peers[1].new_outbound_connection(a_id, fd_b.clone(), None).unwrap();
peers[0].new_inbound_connection(fd_a.clone(), None).unwrap();
// Now disconnect nodes[1] from its peers and restart with accept_forwards_to_priv_channels set
// to true. Sadly there is currently no way to change it at runtime.
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
let nodes_1_serialized = nodes[1].node.encode();
let monitor_a_serialized = get_monitor!(nodes[1], chan_id_1).encode();
no_announce_cfg.accept_forwards_to_priv_channels = true;
reload_node!(nodes[1], no_announce_cfg, &nodes_1_serialized, &[&monitor_a_serialized, &monitor_b_serialized], persister, new_chain_monitor, nodes_1_deserialized);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
- nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }).unwrap();
- nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, false).unwrap();
let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[2]).pop().unwrap();
let cs_reestablish = get_chan_reestablish_msgs!(nodes[2], nodes[1]).pop().unwrap();
nodes[2].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
msg.clone()
} else { panic!("Unexpected event"); };
let (bs_announcement, bs_update) = if let MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } = bs_announce_events[1] {
- (msg.clone(), update_msg.clone())
+ (msg.clone(), update_msg.clone().unwrap())
} else { panic!("Unexpected event"); };
nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs);
let as_announce_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(as_announce_events.len(), 1);
let (announcement, as_update) = if let MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } = as_announce_events[0] {
- (msg.clone(), update_msg.clone())
+ (msg.clone(), update_msg.clone().unwrap())
} else { panic!("Unexpected event"); };
assert_eq!(announcement, bs_announcement);
check_added_monitors!(nodes[0], 1);
pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret);
+
+ as_channel_ready.short_channel_id_alias = Some(0xeadbeef);
+ nodes[2].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &as_channel_ready);
+ // Note that we always respond to a channel_ready with a channel_update. Not a lot of reason
+ // to bother updating that code, so just drop the message here.
+ get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
+
claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
// Now test that if a peer sends us a second channel_ready after the channel is operational we
assert_eq!(open_channel.channel_flags & 1, 1); // The `announce_channel` bit is set.
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
- let err = get_err_msg!(nodes[1], nodes[0].node.get_our_node_id());
+ let err = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id());
assert_eq!(err.data, "SCID Alias/Privacy Channel Type cannot be set on a public channel");
}
nodes[0].node.force_close_all_channels_broadcasting_latest_txn();
check_added_monitors!(nodes[0], 1);
check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed);
- let _ = get_err_msg!(nodes[0], nodes[1].node.get_our_node_id());
+ let _ = get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
}
#[test]
match bs_announcement[0] {
MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
announcement = msg.clone();
- bs_update = update_msg.clone();
+ bs_update = update_msg.clone().unwrap();
},
_ => panic!("Unexpected event"),
};
match as_announcement[0] {
MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
assert!(announcement == *msg);
+ let update_msg = update_msg.as_ref().unwrap();
assert_eq!(update_msg.contents.short_channel_id, scid);
assert_eq!(update_msg.contents.short_channel_id, announcement.contents.short_channel_id);
assert_eq!(update_msg.contents.short_channel_id, bs_update.contents.short_channel_id);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
confirm_transaction(&nodes[0], &tx);
let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
confirm_transaction(&nodes[1], &tx);
let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
assert!(events_2.is_empty());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
// nodes[0] hasn't yet received a channel_ready, so it only sends that on reconnect.
assert_eq!(events_7.len(), 1);
let (chan_announcement, as_update) = match events_7[0] {
MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
- (msg.clone(), update_msg.clone())
+ (msg.clone(), update_msg.clone().unwrap())
},
_ => panic!("Unexpected event {:?}", events_7[0]),
};
let bs_update = match events_8[0] {
MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
assert_eq!(*msg, chan_announcement);
- update_msg.clone()
+ update_msg.clone().unwrap()
},
_ => panic!("Unexpected event {:?}", events_8[0]),
};
// Check that after deserialization and reconnection we can still generate an identical
// channel_announcement from the cached signatures.
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
let chan_0_monitor_serialized =
get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode();
reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
let chan_0_monitor_serialized = get_monitor!(nodes[0], bs_funding_signed.channel_id).encode();
reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
// After deserializing, make sure the funding_transaction is still held by the channel manager
let events_4 = nodes[0].node.get_and_clear_pending_events();
// Make sure the channel is functioning as though the de/serialization never happened
assert_eq!(nodes[0].node.list_channels().len(), 1);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
let nodes_0_serialized = nodes[0].node.encode();
route_payment(&nodes[0], &[&nodes[3]], 1000000);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id());
// Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
// nodes[3])
//... and we can even still claim the payment!
claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
- nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
let reestablish = get_chan_reestablish_msgs!(nodes[3], nodes[0]).pop().unwrap();
- nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }, false).unwrap();
nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish);
let mut found_err = false;
for msg_event in nodes[0].node.get_and_clear_pending_msg_events() {
send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reload_node!(nodes[0], previous_node_state, &[&previous_chain_monitor_state], persister, new_chain_monitor, nodes_0_deserialized);
if reconnect_panicing {
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
// after the warning message sent by B, we should not able to
// use the channel, or reconnect with success to the channel.
assert!(nodes[0].node.list_usable_channels().is_empty());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
// Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
// Send the payment through to nodes[3] *without* clearing the PaymentClaimable event
let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(send_events.len(), 2);
- let (node_1_msgs, mut send_events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &send_events);
- let (node_2_msgs, _send_events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &send_events);
+ let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut send_events);
+ let node_2_msgs = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut send_events);
do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, true, false, None);
do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_2_msgs, true, false, None);
assert!(get_monitor!(nodes[3], chan_id_persisted).get_stored_preimages().contains_key(&payment_hash));
assert!(get_monitor!(nodes[3], chan_id_not_persisted).get_stored_preimages().contains_key(&payment_hash));
- nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
- nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id());
+ nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id());
// During deserialization, we should have closed one channel and broadcast its latest
// commitment transaction. We should also still have the original PaymentClaimable event we
if !persist_both_monitors {
// If one of the two channels is still live, reveal the payment preimage over it.
- nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]);
- nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]);
nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]);
let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(bs_commitment_tx.len(), 1);
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
if use_cs_commitment {
// Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is
// now forgotten everywhere. The ChannelManager should have, as a side-effect of reload,
// learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
use crate::chain::transaction::OutPoint;
use crate::chain::Confirm;
use crate::ln::channelmanager::ChannelManager;
-use crate::ln::msgs::ChannelMessageHandler;
-use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
+use crate::ln::msgs::{ChannelMessageHandler, Init};
+use crate::util::events::{Event, MessageSendEventsProvider, ClosureReason, HTLCDestination};
use crate::util::test_utils;
use crate::util::ser::Writeable;
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
// Now check that we can create a new channel
+ if reload_node && nodes[0].node.per_peer_state.read().unwrap().len() == 0 {
+ // If we dropped the channel before reloading the node, nodes[1] was also dropped from
+ // nodes[0] storage, and hence not connected again on startup. We therefore need to
+ // reconnect to the node before attempting to create a new channel.
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ }
create_announced_chan_between_nodes(&nodes, 0, 1);
send_payment(&nodes[0], &[&nodes[1]], 8000000);
}
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
let logger = test_utils::TestLogger::new();
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
}
}
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
let node_0_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
let node_1_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish);
assert!(node_0_2nd_closing_signed.is_some());
}
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
let node_1_2nd_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, false).unwrap();
if recv_count == 0 {
// If all closing_signeds weren't delivered we can just resume where we left off...
let node_0_2nd_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
//! The payment recipient must include a [`PaymentHash`], so as to reveal the preimage upon payment
//! receipt, and one or more [`BlindedPath`]s for the payer to use when sending the payment.
//!
-//! ```ignore
+//! ```
//! extern crate bitcoin;
//! extern crate lightning;
//!
//!
//! // Invoice for the "offer to be paid" flow.
//! InvoiceRequest::try_from(bytes)?
-//! .respond_with(payment_paths, payment_hash)?
+#![cfg_attr(feature = "std", doc = "
+ .respond_with(payment_paths, payment_hash)?
+")]
+#![cfg_attr(not(feature = "std"), doc = "
+ .respond_with_no_std(payment_paths, payment_hash, core::time::Duration::from_secs(0))?
+")]
//! .relative_expiry(3600)
//! .allow_mpp()
//! .fallback_v0_p2wpkh(&wpubkey_hash)
//! // Invoice for the "offer for money" flow.
//! "lnr1qcp4256ypq"
//! .parse::<Refund>()?
-//! .respond_with(payment_paths, payment_hash, pubkey)?
+#![cfg_attr(feature = "std", doc = "
+ .respond_with(payment_paths, payment_hash, pubkey)?
+")]
+#![cfg_attr(not(feature = "std"), doc = "
+ .respond_with_no_std(payment_paths, payment_hash, pubkey, core::time::Duration::from_secs(0))?
+")]
//! .relative_expiry(3600)
//! .allow_mpp()
//! .fallback_v0_p2wpkh(&wpubkey_hash)
Some(amount_msats) => amount_msats,
None => match invoice_request.contents.offer.amount() {
Some(Amount::Bitcoin { amount_msats }) => {
- amount_msats * invoice_request.quantity().unwrap_or(1)
+ amount_msats.checked_mul(invoice_request.quantity().unwrap_or(1))
+ .ok_or(SemanticError::InvalidAmount)?
},
Some(Amount::Currency { .. }) => return Err(SemanticError::UnsupportedCurrency),
None => return Err(SemanticError::MissingAmount),
}
impl<'a> UnsignedInvoice<'a> {
+ /// The public key corresponding to the key needed to sign the invoice.
+ pub fn signing_pubkey(&self) -> PublicKey {
+ self.invoice.fields().signing_pubkey
+ }
+
/// Signs the invoice using the given function.
pub fn sign<F, E>(self, sign: F) -> Result<Invoice, SignError<E>>
where
/// [`Offer`]: crate::offers::offer::Offer
/// [`Refund`]: crate::offers::refund::Refund
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
+#[derive(Clone, Debug, PartialEq)]
pub struct Invoice {
bytes: Vec<u8>,
contents: InvoiceContents,
///
/// [`Offer`]: crate::offers::offer::Offer
/// [`Refund`]: crate::offers::refund::Refund
+#[derive(Clone, Debug, PartialEq)]
enum InvoiceContents {
/// Contents for an [`Invoice`] corresponding to an [`Offer`].
///
}
/// Invoice-specific fields for an `invoice` message.
+#[derive(Clone, Debug, PartialEq)]
struct InvoiceFields {
payment_paths: Vec<(BlindedPath, BlindedPayInfo)>,
created_at: Duration,
&self.contents.fields().features
}
- /// The public key used to sign invoices.
+ /// The public key corresponding to the key used to sign the invoice.
pub fn signing_pubkey(&self) -> PublicKey {
self.contents.fields().signing_pubkey
}
- /// Signature of the invoice using [`Invoice::signing_pubkey`].
+ /// Signature of the invoice verified using [`Invoice::signing_pubkey`].
pub fn signature(&self) -> Signature {
self.signature
}
/// Information needed to route a payment across a [`BlindedPath`].
#[derive(Clone, Debug, PartialEq)]
pub struct BlindedPayInfo {
- fee_base_msat: u32,
- fee_proportional_millionths: u32,
- cltv_expiry_delta: u16,
- htlc_minimum_msat: u64,
- htlc_maximum_msat: u64,
- features: BlindedHopFeatures,
+ /// Base fee charged (in millisatoshi) for the entire blinded path.
+ pub fee_base_msat: u32,
+
+ /// Liquidity fee charged (in millionths of the amount transferred) for the entire blinded path
+ /// (i.e., 10,000 is 1%).
+ pub fee_proportional_millionths: u32,
+
+ /// Number of blocks subtracted from an incoming HTLC's `cltv_expiry` for the entire blinded
+ /// path.
+ pub cltv_expiry_delta: u16,
+
+ /// The minimum HTLC value (in millisatoshi) that is acceptable to all channel peers on the
+ /// blinded path from the introduction node to the recipient, accounting for any fees, i.e., as
+ /// seen by the recipient.
+ pub htlc_minimum_msat: u64,
+
+ /// The maximum HTLC value (in millisatoshi) that is acceptable to all channel peers on the
+ /// blinded path from the introduction node to the recipient, accounting for any fees, i.e., as
+ /// seen by the recipient.
+ pub htlc_maximum_msat: u64,
+
+ /// Features set in `encrypted_data_tlv` for the `encrypted_recipient_data` TLV record in an
+ /// onion payload.
+ pub features: BlindedHopFeatures,
}
impl_writeable!(BlindedPayInfo, {
use crate::ln::features::{BlindedHopFeatures, Bolt12InvoiceFeatures};
use crate::offers::invoice_request::InvoiceRequestTlvStreamRef;
use crate::offers::merkle::{SignError, SignatureTlvStreamRef, self};
- use crate::offers::offer::{OfferBuilder, OfferTlvStreamRef};
+ use crate::offers::offer::{OfferBuilder, OfferTlvStreamRef, Quantity};
use crate::offers::parse::{ParseError, SemanticError};
use crate::offers::payer::PayerTlvStreamRef;
use crate::offers::refund::RefundBuilder;
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths.clone(), payment_hash, now).unwrap()
+ .respond_with_no_std(payment_paths.clone(), payment_hash, now).unwrap()
.build().unwrap()
.sign(recipient_sign).unwrap();
let now = now();
let invoice = RefundBuilder::new("foo".into(), vec![1; 32], payer_pubkey(), 1000).unwrap()
.build().unwrap()
- .respond_with(payment_paths.clone(), payment_hash, recipient_pubkey(), now).unwrap()
+ .respond_with_no_std(payment_paths.clone(), payment_hash, recipient_pubkey(), now)
+ .unwrap()
.build().unwrap()
.sign(recipient_sign).unwrap();
}
}
+ #[cfg(feature = "std")]
+ #[test]
+ fn builds_invoice_from_offer_with_expiration() {
+ let future_expiry = Duration::from_secs(u64::max_value());
+ let past_expiry = Duration::from_secs(0);
+
+ if let Err(e) = OfferBuilder::new("foo".into(), recipient_pubkey())
+ .amount_msats(1000)
+ .absolute_expiry(future_expiry)
+ .build().unwrap()
+ .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+ .build().unwrap()
+ .sign(payer_sign).unwrap()
+ .respond_with(payment_paths(), payment_hash())
+ .unwrap()
+ .build()
+ {
+ panic!("error building invoice: {:?}", e);
+ }
+
+ match OfferBuilder::new("foo".into(), recipient_pubkey())
+ .amount_msats(1000)
+ .absolute_expiry(past_expiry)
+ .build().unwrap()
+ .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+ .build_unchecked()
+ .sign(payer_sign).unwrap()
+ .respond_with(payment_paths(), payment_hash())
+ .unwrap()
+ .build()
+ {
+ Ok(_) => panic!("expected error"),
+ Err(e) => assert_eq!(e, SemanticError::AlreadyExpired),
+ }
+ }
+
#[cfg(feature = "std")]
#[test]
fn builds_invoice_from_refund_with_expiration() {
if let Err(e) = RefundBuilder::new("foo".into(), vec![1; 32], payer_pubkey(), 1000).unwrap()
.absolute_expiry(future_expiry)
.build().unwrap()
- .respond_with(payment_paths(), payment_hash(), recipient_pubkey(), now()).unwrap()
+ .respond_with(payment_paths(), payment_hash(), recipient_pubkey())
+ .unwrap()
.build()
{
panic!("error building invoice: {:?}", e);
match RefundBuilder::new("foo".into(), vec![1; 32], payer_pubkey(), 1000).unwrap()
.absolute_expiry(past_expiry)
.build().unwrap()
- .respond_with(payment_paths(), payment_hash(), recipient_pubkey(), now()).unwrap()
+ .respond_with(payment_paths(), payment_hash(), recipient_pubkey())
+ .unwrap()
.build()
{
Ok(_) => panic!("expected error"),
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now).unwrap()
.relative_expiry(one_hour.as_secs() as u32)
.build().unwrap()
.sign(recipient_sign).unwrap();
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now - one_hour).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now - one_hour).unwrap()
.relative_expiry(one_hour.as_secs() as u32 - 1)
.build().unwrap()
.sign(recipient_sign).unwrap();
.amount_msats(1001).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.build().unwrap()
.sign(recipient_sign).unwrap();
let (_, _, _, tlv_stream, _) = invoice.as_tlv_stream();
assert_eq!(tlv_stream.amount, Some(1001));
}
+ #[test]
+ fn builds_invoice_with_quantity_from_request() {
+ let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
+ .amount_msats(1000)
+ .supported_quantity(Quantity::Unbounded)
+ .build().unwrap()
+ .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+ .quantity(2).unwrap()
+ .build().unwrap()
+ .sign(payer_sign).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
+ .build().unwrap()
+ .sign(recipient_sign).unwrap();
+ let (_, _, _, tlv_stream, _) = invoice.as_tlv_stream();
+ assert_eq!(invoice.amount_msats(), 2000);
+ assert_eq!(tlv_stream.amount, Some(2000));
+
+ match OfferBuilder::new("foo".into(), recipient_pubkey())
+ .amount_msats(1000)
+ .supported_quantity(Quantity::Unbounded)
+ .build().unwrap()
+ .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+ .quantity(u64::max_value()).unwrap()
+ .build_unchecked()
+ .sign(payer_sign).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now())
+ {
+ Ok(_) => panic!("expected error"),
+ Err(e) => assert_eq!(e, SemanticError::InvalidAmount),
+ }
+ }
+
#[test]
fn builds_invoice_with_fallback_address() {
let script = Script::new();
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.fallback_v0_p2wsh(&script.wscript_hash())
.fallback_v0_p2wpkh(&pubkey.wpubkey_hash().unwrap())
.fallback_v1_p2tr_tweaked(&tweaked_pubkey)
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.allow_mpp()
.build().unwrap()
.sign(recipient_sign).unwrap();
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.build().unwrap()
.sign(|_| Err(()))
{
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.build().unwrap()
.sign(payer_sign)
{
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.build().unwrap()
.sign(recipient_sign).unwrap();
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.build().unwrap()
.sign(recipient_sign).unwrap();
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.relative_expiry(3600)
.build().unwrap()
.sign(recipient_sign).unwrap();
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.build().unwrap()
.sign(recipient_sign).unwrap();
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.build().unwrap()
.sign(recipient_sign).unwrap();
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.allow_mpp()
.build().unwrap()
.sign(recipient_sign).unwrap();
.build().unwrap()
.sign(payer_sign).unwrap();
let mut unsigned_invoice = invoice_request
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.fallback_v0_p2wsh(&script.wscript_hash())
.fallback_v0_p2wpkh(&pubkey.wpubkey_hash().unwrap())
.fallback_v1_p2tr_tweaked(&tweaked_pubkey)
.build().unwrap();
// Only standard addresses will be included.
- let mut fallbacks = unsigned_invoice.invoice.fields_mut().fallbacks.as_mut().unwrap();
+ let fallbacks = unsigned_invoice.invoice.fields_mut().fallbacks.as_mut().unwrap();
// Non-standard addresses
fallbacks.push(FallbackAddress { version: 1, program: vec![0u8; 41] });
fallbacks.push(FallbackAddress { version: 2, program: vec![0u8; 1] });
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.build().unwrap()
.sign(recipient_sign).unwrap();
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.build().unwrap()
.invoice
.write(&mut buffer).unwrap();
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.build().unwrap()
.sign(recipient_sign).unwrap();
let last_signature_byte = invoice.bytes.last_mut().unwrap();
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap()
- .respond_with(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.build().unwrap()
.sign(recipient_sign).unwrap();
//! [`Invoice`]: crate::offers::invoice::Invoice
//! [`Refund`]: crate::offers::refund::Refund
//!
-//! ```ignore
+//! ```
//! extern crate bitcoin;
//! extern crate lightning;
//!
///
/// [`Invoice`]: crate::offers::invoice::Invoice
/// [`Offer`]: crate::offers::offer::Offer
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub struct InvoiceRequest {
pub(super) bytes: Vec<u8>,
pub(super) contents: InvoiceRequestContents,
/// The contents of an [`InvoiceRequest`], which may be shared with an [`Invoice`].
///
/// [`Invoice`]: crate::offers::invoice::Invoice
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub(super) struct InvoiceRequestContents {
payer: PayerContents,
pub(super) offer: OfferContents,
self.signature
}
+ /// Creates an [`Invoice`] for the request with the given required fields and using the
+ /// [`Duration`] since [`std::time::SystemTime::UNIX_EPOCH`] as the creation time.
+ ///
+ /// See [`InvoiceRequest::respond_with_no_std`] for further details where the aforementioned
+ /// creation time is used for the `created_at` parameter.
+ ///
+ /// [`Invoice`]: crate::offers::invoice::Invoice
+ /// [`Duration`]: core::time::Duration
+ #[cfg(feature = "std")]
+ pub fn respond_with(
+ &self, payment_paths: Vec<(BlindedPath, BlindedPayInfo)>, payment_hash: PaymentHash
+ ) -> Result<InvoiceBuilder, SemanticError> {
+ let created_at = std::time::SystemTime::now()
+ .duration_since(std::time::SystemTime::UNIX_EPOCH)
+ .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
+
+ self.respond_with_no_std(payment_paths, payment_hash, created_at)
+ }
+
/// Creates an [`Invoice`] for the request with the given required fields.
///
/// Unless [`InvoiceBuilder::relative_expiry`] is set, the invoice will expire two hours after
- /// calling this method in `std` builds. For `no-std` builds, a final [`Duration`] parameter
- /// must be given, which is used to set [`Invoice::created_at`] since [`std::time::SystemTime`]
- /// is not available.
+ /// `created_at`, which is used to set [`Invoice::created_at`]. Useful for `no-std` builds where
+ /// [`std::time::SystemTime`] is not available.
///
/// The caller is expected to remember the preimage of `payment_hash` in order to claim a payment
/// for the invoice.
///
/// Errors if the request contains unknown required features.
///
- /// [`Duration`]: core::time::Duration
/// [`Invoice`]: crate::offers::invoice::Invoice
/// [`Invoice::created_at`]: crate::offers::invoice::Invoice::created_at
- pub fn respond_with(
+ pub fn respond_with_no_std(
&self, payment_paths: Vec<(BlindedPath, BlindedPayInfo)>, payment_hash: PaymentHash,
- #[cfg(any(test, not(feature = "std")))]
created_at: core::time::Duration
) -> Result<InvoiceBuilder, SemanticError> {
if self.features().requires_unknown_bits() {
return Err(SemanticError::UnknownRequiredFeatures);
}
- #[cfg(all(not(test), feature = "std"))]
- let created_at = std::time::SystemTime::now()
- .duration_since(std::time::SystemTime::UNIX_EPOCH)
- .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
-
InvoiceBuilder::for_offer(self, payment_paths, created_at, payment_hash)
}
Ok(_) => panic!("expected error"),
Err(e) => assert_eq!(e, SemanticError::MissingAmount),
}
+
+ match OfferBuilder::new("foo".into(), recipient_pubkey())
+ .amount_msats(1000)
+ .supported_quantity(Quantity::Unbounded)
+ .build().unwrap()
+ .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+ .quantity(u64::max_value()).unwrap()
+ .build()
+ {
+ Ok(_) => panic!("expected error"),
+ Err(e) => assert_eq!(e, SemanticError::InvalidAmount),
+ }
}
#[test]
assert_eq!(e, ParseError::InvalidSemantics(SemanticError::UnsupportedCurrency));
},
}
+
+ let invoice_request = OfferBuilder::new("foo".into(), recipient_pubkey())
+ .amount_msats(1000)
+ .supported_quantity(Quantity::Unbounded)
+ .build().unwrap()
+ .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+ .quantity(u64::max_value()).unwrap()
+ .build_unchecked()
+ .sign(payer_sign).unwrap();
+
+ let mut buffer = Vec::new();
+ invoice_request.write(&mut buffer).unwrap();
+
+ match InvoiceRequest::try_from(buffer) {
+ Ok(_) => panic!("expected error"),
+ Err(e) => assert_eq!(e, ParseError::InvalidSemantics(SemanticError::InvalidAmount)),
+ }
}
#[test]
//! published as a QR code to be scanned by a customer. The customer uses the offer to request an
//! invoice from the merchant to be paid.
//!
-//! ```ignore
+//! ```
//! extern crate bitcoin;
//! extern crate core;
//! extern crate lightning;
///
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
/// [`Invoice`]: crate::offers::invoice::Invoice
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub struct Offer {
// The serialized offer. Needed when creating an `InvoiceRequest` if the offer contains unknown
// fields.
///
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
/// [`Invoice`]: crate::offers::invoice::Invoice
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub(super) struct OfferContents {
chains: Option<Vec<ChainHash>>,
metadata: Option<Vec<u8>>,
};
if !self.expects_quantity() || quantity.is_some() {
- let expected_amount_msats = offer_amount_msats * quantity.unwrap_or(1);
+ let expected_amount_msats = offer_amount_msats.checked_mul(quantity.unwrap_or(1))
+ .ok_or(SemanticError::InvalidAmount)?;
let amount_msats = amount_msats.unwrap_or(expected_amount_msats);
if amount_msats < expected_amount_msats {
//! Parsing and formatting for bech32 message encoding.
use bitcoin::bech32;
-use bitcoin::bech32::{FromBase32, ToBase32};
use bitcoin::secp256k1;
use core::convert::TryFrom;
-use core::fmt;
use crate::io;
use crate::ln::msgs::DecodeError;
use crate::util::ser::SeekReadable;
use crate::prelude::*;
-/// Indicates a message can be encoded using bech32.
-pub(super) trait Bech32Encode: AsRef<[u8]> + TryFrom<Vec<u8>, Error=ParseError> {
- /// Human readable part of the message's bech32 encoding.
- const BECH32_HRP: &'static str;
-
- /// Parses a bech32-encoded message into a TLV stream.
- fn from_bech32_str(s: &str) -> Result<Self, ParseError> {
- // Offer encoding may be split by '+' followed by optional whitespace.
- let encoded = match s.split('+').skip(1).next() {
- Some(_) => {
- for chunk in s.split('+') {
- let chunk = chunk.trim_start();
- if chunk.is_empty() || chunk.contains(char::is_whitespace) {
- return Err(ParseError::InvalidContinuation);
+#[cfg(not(fuzzing))]
+pub(super) use sealed::Bech32Encode;
+
+#[cfg(fuzzing)]
+pub use sealed::Bech32Encode;
+
+mod sealed {
+ use bitcoin::bech32;
+ use bitcoin::bech32::{FromBase32, ToBase32};
+ use core::convert::TryFrom;
+ use core::fmt;
+ use super::ParseError;
+
+ use crate::prelude::*;
+
+ /// Indicates a message can be encoded using bech32.
+ pub trait Bech32Encode: AsRef<[u8]> + TryFrom<Vec<u8>, Error=ParseError> {
+ /// Human readable part of the message's bech32 encoding.
+ const BECH32_HRP: &'static str;
+
+ /// Parses a bech32-encoded message into a TLV stream.
+ fn from_bech32_str(s: &str) -> Result<Self, ParseError> {
+ // Offer encoding may be split by '+' followed by optional whitespace.
+ let encoded = match s.split('+').skip(1).next() {
+ Some(_) => {
+ for chunk in s.split('+') {
+ let chunk = chunk.trim_start();
+ if chunk.is_empty() || chunk.contains(char::is_whitespace) {
+ return Err(ParseError::InvalidContinuation);
+ }
}
- }
- let s = s.chars().filter(|c| *c != '+' && !c.is_whitespace()).collect::<String>();
- Bech32String::Owned(s)
- },
- None => Bech32String::Borrowed(s),
- };
+ let s: String = s.chars().filter(|c| *c != '+' && !c.is_whitespace()).collect();
+ Bech32String::Owned(s)
+ },
+ None => Bech32String::Borrowed(s),
+ };
- let (hrp, data) = bech32::decode_without_checksum(encoded.as_ref())?;
+ let (hrp, data) = bech32::decode_without_checksum(encoded.as_ref())?;
- if hrp != Self::BECH32_HRP {
- return Err(ParseError::InvalidBech32Hrp);
- }
+ if hrp != Self::BECH32_HRP {
+ return Err(ParseError::InvalidBech32Hrp);
+ }
- let data = Vec::<u8>::from_base32(&data)?;
- Self::try_from(data)
- }
+ let data = Vec::<u8>::from_base32(&data)?;
+ Self::try_from(data)
+ }
- /// Formats the message using bech32-encoding.
- fn fmt_bech32_str(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
- bech32::encode_without_checksum_to_fmt(f, Self::BECH32_HRP, self.as_ref().to_base32())
- .expect("HRP is invalid").unwrap();
+ /// Formats the message using bech32-encoding.
+ fn fmt_bech32_str(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ bech32::encode_without_checksum_to_fmt(f, Self::BECH32_HRP, self.as_ref().to_base32())
+ .expect("HRP is invalid").unwrap();
- Ok(())
+ Ok(())
+ }
}
-}
-// Used to avoid copying a bech32 string not containing the continuation character (+).
-enum Bech32String<'a> {
- Borrowed(&'a str),
- Owned(String),
-}
+ // Used to avoid copying a bech32 string not containing the continuation character (+).
+ enum Bech32String<'a> {
+ Borrowed(&'a str),
+ Owned(String),
+ }
-impl<'a> AsRef<str> for Bech32String<'a> {
- fn as_ref(&self) -> &str {
- match self {
- Bech32String::Borrowed(s) => s,
- Bech32String::Owned(s) => s,
+ impl<'a> AsRef<str> for Bech32String<'a> {
+ fn as_ref(&self) -> &str {
+ match self {
+ Bech32String::Borrowed(s) => s,
+ Bech32String::Owned(s) => s,
+ }
}
}
}
/// [`InvoiceRequest::payer_id`].
///
/// [`InvoiceRequest::payer_id`]: crate::offers::invoice_request::InvoiceRequest::payer_id
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub(super) struct PayerContents(pub Vec<u8>);
tlv_stream!(PayerTlvStream, PayerTlvStreamRef, 0..1, {
//! [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
//! [`Offer`]: crate::offers::offer::Offer
//!
-//! ```ignore
+//! ```
//! extern crate bitcoin;
//! extern crate core;
//! extern crate lightning;
///
/// [`Invoice`]: crate::offers::invoice::Invoice
/// [`Offer`]: crate::offers::offer::Offer
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub struct Refund {
pub(super) bytes: Vec<u8>,
pub(super) contents: RefundContents,
/// The contents of a [`Refund`], which may be shared with an [`Invoice`].
///
/// [`Invoice`]: crate::offers::invoice::Invoice
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, PartialEq)]
pub(super) struct RefundContents {
payer: PayerContents,
// offer fields
self.contents.payer_note.as_ref().map(|payer_note| PrintableString(payer_note.as_str()))
}
+ /// Creates an [`Invoice`] for the refund with the given required fields and using the
+ /// [`Duration`] since [`std::time::SystemTime::UNIX_EPOCH`] as the creation time.
+ ///
+ /// See [`Refund::respond_with_no_std`] for further details where the aforementioned creation
+ /// time is used for the `created_at` parameter.
+ ///
+ /// [`Invoice`]: crate::offers::invoice::Invoice
+ /// [`Duration`]: core::time::Duration
+ #[cfg(feature = "std")]
+ pub fn respond_with(
+ &self, payment_paths: Vec<(BlindedPath, BlindedPayInfo)>, payment_hash: PaymentHash,
+ signing_pubkey: PublicKey,
+ ) -> Result<InvoiceBuilder, SemanticError> {
+ let created_at = std::time::SystemTime::now()
+ .duration_since(std::time::SystemTime::UNIX_EPOCH)
+ .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
+
+ self.respond_with_no_std(payment_paths, payment_hash, signing_pubkey, created_at)
+ }
+
/// Creates an [`Invoice`] for the refund with the given required fields.
///
/// Unless [`InvoiceBuilder::relative_expiry`] is set, the invoice will expire two hours after
- /// calling this method in `std` builds. For `no-std` builds, a final [`Duration`] parameter
- /// must be given, which is used to set [`Invoice::created_at`] since [`std::time::SystemTime`]
- /// is not available.
+ /// `created_at`, which is used to set [`Invoice::created_at`]. Useful for `no-std` builds where
+ /// [`std::time::SystemTime`] is not available.
///
/// The caller is expected to remember the preimage of `payment_hash` in order to
/// claim a payment for the invoice.
///
/// [`Invoice`]: crate::offers::invoice::Invoice
/// [`Invoice::created_at`]: crate::offers::invoice::Invoice::created_at
- pub fn respond_with(
+ pub fn respond_with_no_std(
&self, payment_paths: Vec<(BlindedPath, BlindedPayInfo)>, payment_hash: PaymentHash,
- signing_pubkey: PublicKey,
- #[cfg(any(test, not(feature = "std")))]
- created_at: Duration
+ signing_pubkey: PublicKey, created_at: Duration
) -> Result<InvoiceBuilder, SemanticError> {
if self.features().requires_unknown_bits() {
return Err(SemanticError::UnknownRequiredFeatures);
}
- #[cfg(all(not(test), feature = "std"))]
- let created_at = std::time::SystemTime::now()
- .duration_since(std::time::SystemTime::UNIX_EPOCH)
- .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
-
InvoiceBuilder::for_refund(self, payment_paths, created_at, payment_hash, signing_pubkey)
}
use bitcoin::secp256k1::{PublicKey, Secp256k1};
use crate::io;
+use crate::io_extras::read_to_end;
use crate::sync::Arc;
struct MessengerNode {
fn handle_custom_message(&self, _msg: Self::CustomMessage) {}
fn read_custom_message<R: io::Read>(&self, message_type: u64, buffer: &mut R) -> Result<Option<Self::CustomMessage>, DecodeError> where Self: Sized {
if message_type == CUSTOM_MESSAGE_TYPE {
- let mut buf = Vec::new();
- buffer.read_to_end(&mut buf)?;
+ let buf = read_to_end(buffer)?;
assert_eq!(buf, CUSTOM_MESSAGE_CONTENTS);
return Ok(Some(TestCustomMessage {}))
}
let mut features = InitFeatures::empty();
features.set_onion_messages_optional();
let init_msg = msgs::Init { features, remote_network_address: None };
- nodes[i].messenger.peer_connected(&nodes[i + 1].get_node_pk(), &init_msg.clone()).unwrap();
- nodes[i + 1].messenger.peer_connected(&nodes[i].get_node_pk(), &init_msg.clone()).unwrap();
+ nodes[i].messenger.peer_connected(&nodes[i + 1].get_node_pk(), &init_msg.clone(), true).unwrap();
+ nodes[i + 1].messenger.peer_connected(&nodes[i].get_node_pk(), &init_msg.clone(), false).unwrap();
}
nodes
}
/// # extern crate bitcoin;
/// # use bitcoin::hashes::_export::_core::time::Duration;
/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
-/// # use lightning::chain::keysinterface::{InMemorySigner, KeysManager};
-/// # use lightning::ln::msgs::DecodeError;
+/// # use lightning::chain::keysinterface::KeysManager;
/// # use lightning::ln::peer_handler::IgnoringMessageHandler;
/// # use lightning::onion_message::{BlindedPath, CustomOnionMessageContents, Destination, OnionMessageContents, OnionMessenger};
/// # use lightning::util::logger::{Logger, Record};
/// # use lightning::util::ser::{Writeable, Writer};
/// # use lightning::io;
/// # use std::sync::Arc;
-/// # struct FakeLogger {};
+/// # struct FakeLogger;
/// # impl Logger for FakeLogger {
/// # fn log(&self, record: &Record) { unimplemented!() }
/// # }
/// # let your_custom_message_handler = IgnoringMessageHandler {};
/// // Create the onion messenger. This must use the same `keys_manager` as is passed to your
/// // ChannelManager.
-/// let onion_messenger = OnionMessenger::new(&keys_manager, &keys_manager, logger, your_custom_message_handler);
+/// let onion_messenger = OnionMessenger::new(&keys_manager, &keys_manager, logger, &your_custom_message_handler);
///
/// # struct YourCustomMessage {}
/// impl Writeable for YourCustomMessage {
};
}
- fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init) -> Result<(), ()> {
+ fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init, _inbound: bool) -> Result<(), ()> {
if init.features.supports_onion_messages() {
let mut peers = self.pending_messages.lock().unwrap();
peers.insert(their_node_id.clone(), VecDeque::new());
Ok(())
}
- fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
+ fn peer_disconnected(&self, their_node_id: &PublicKey) {
let mut pending_msgs = self.pending_messages.lock().unwrap();
pending_msgs.remove(their_node_id);
}
// You may not use this file except in accordance with one or both of these
// licenses.
-//! The top-level network map tracking logic lives here.
+//! The [`NetworkGraph`] stores the network gossip and [`P2PGossipSync`] fetches it from peers
use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
use bitcoin::secp256k1::PublicKey;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hashes::Hash;
-use bitcoin::blockdata::transaction::TxOut;
use bitcoin::hash_types::BlockHash;
-use crate::chain;
-use crate::chain::Access;
-use crate::ln::chan_utils::make_funding_redeemscript_from_slices;
+use bitcoin::network::constants::Network;
+use bitcoin::blockdata::constants::genesis_block;
+
use crate::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures};
use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter};
use crate::ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd};
use crate::ln::msgs;
+use crate::routing::utxo::{self, UtxoLookup};
use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, MaybeReadable};
use crate::util::logger::{Logger, Level};
use crate::util::events::{MessageSendEvent, MessageSendEventsProvider};
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::Mutex;
use core::ops::{Bound, Deref};
-use bitcoin::hashes::hex::ToHex;
#[cfg(feature = "std")]
use std::time::{SystemTime, UNIX_EPOCH};
/// resync them from gossip. Each `NodeId` is mapped to the time (in seconds) it was removed so
/// that once some time passes, we can potentially resync it from gossip again.
removed_nodes: Mutex<HashMap<NodeId, Option<u64>>>,
+ /// Announcement messages which are awaiting an on-chain lookup to be processed.
+ pub(super) pending_checks: utxo::PendingChecks,
}
/// A read-only view of [`NetworkGraph`].
/// This network graph is then used for routing payments.
/// Provides interface to help with initial routing sync by
/// serving historical announcements.
-pub struct P2PGossipSync<G: Deref<Target=NetworkGraph<L>>, C: Deref, L: Deref>
-where C::Target: chain::Access, L::Target: Logger
+pub struct P2PGossipSync<G: Deref<Target=NetworkGraph<L>>, U: Deref, L: Deref>
+where U::Target: UtxoLookup, L::Target: Logger
{
network_graph: G,
- chain_access: Option<C>,
+ utxo_lookup: Option<U>,
#[cfg(feature = "std")]
full_syncs_requested: AtomicUsize,
pending_events: Mutex<Vec<MessageSendEvent>>,
logger: L,
}
-impl<G: Deref<Target=NetworkGraph<L>>, C: Deref, L: Deref> P2PGossipSync<G, C, L>
-where C::Target: chain::Access, L::Target: Logger
+impl<G: Deref<Target=NetworkGraph<L>>, U: Deref, L: Deref> P2PGossipSync<G, U, L>
+where U::Target: UtxoLookup, L::Target: Logger
{
/// Creates a new tracker of the actual state of the network of channels and nodes,
/// assuming an existing Network Graph.
- /// Chain monitor is used to make sure announced channels exist on-chain,
- /// channel data is correct, and that the announcement is signed with
- /// channel owners' keys.
- pub fn new(network_graph: G, chain_access: Option<C>, logger: L) -> Self {
+ /// UTXO lookup is used to make sure announced channels exist on-chain, channel data is
+ /// correct, and the announcement is signed with channel owners' keys.
+ pub fn new(network_graph: G, utxo_lookup: Option<U>, logger: L) -> Self {
P2PGossipSync {
network_graph,
#[cfg(feature = "std")]
full_syncs_requested: AtomicUsize::new(0),
- chain_access,
+ utxo_lookup,
pending_events: Mutex::new(vec![]),
logger,
}
/// Adds a provider used to check new announcements. Does not affect
/// existing announcements unless they are updated.
/// Add, update or remove the provider would replace the current one.
- pub fn add_chain_access(&mut self, chain_access: Option<C>) {
- self.chain_access = chain_access;
+ pub fn add_utxo_lookup(&mut self, utxo_lookup: Option<U>) {
+ self.utxo_lookup = utxo_lookup;
}
/// Gets a reference to the underlying [`NetworkGraph`] which was provided in
false
}
}
+
+ /// Used to broadcast forward gossip messages which were validated async.
+ ///
+ /// Note that this will ignore events other than `Broadcast*` or messages with too much excess
+ /// data.
+ pub(super) fn forward_gossip_msg(&self, mut ev: MessageSendEvent) {
+ match &mut ev {
+ MessageSendEvent::BroadcastChannelAnnouncement { msg, ref mut update_msg } => {
+ if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { return; }
+ if update_msg.as_ref()
+ .map(|msg| msg.contents.excess_data.len()).unwrap_or(0) > MAX_EXCESS_BYTES_FOR_RELAY
+ {
+ *update_msg = None;
+ }
+ },
+ MessageSendEvent::BroadcastChannelUpdate { msg } => {
+ if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { return; }
+ },
+ MessageSendEvent::BroadcastNodeAnnouncement { msg } => {
+ if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY ||
+ msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY ||
+ msg.contents.excess_data.len() + msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY
+ {
+ return;
+ }
+ },
+ _ => return,
+ }
+ self.pending_events.lock().unwrap().push(ev);
+ }
}
impl<L: Deref> NetworkGraph<L> where L::Target: Logger {
}
}
-impl<G: Deref<Target=NetworkGraph<L>>, C: Deref, L: Deref> RoutingMessageHandler for P2PGossipSync<G, C, L>
-where C::Target: chain::Access, L::Target: Logger
+impl<G: Deref<Target=NetworkGraph<L>>, U: Deref, L: Deref> RoutingMessageHandler for P2PGossipSync<G, U, L>
+where U::Target: UtxoLookup, L::Target: Logger
{
fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> {
self.network_graph.update_node_from_announcement(msg)?;
}
fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> {
- self.network_graph.update_channel_from_announcement(msg, &self.chain_access)?;
- log_gossip!(self.logger, "Added channel_announcement for {}{}", msg.contents.short_channel_id, if !msg.contents.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" });
+ self.network_graph.update_channel_from_announcement(msg, &self.utxo_lookup)?;
Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY)
}
}
fn get_next_channel_announcement(&self, starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
- let channels = self.network_graph.channels.read().unwrap();
+ let mut channels = self.network_graph.channels.write().unwrap();
for (_, ref chan) in channels.range(starting_point..) {
if chan.announcement_message.is_some() {
let chan_announcement = chan.announcement_message.clone().unwrap();
}
fn get_next_node_announcement(&self, starting_point: Option<&NodeId>) -> Option<NodeAnnouncement> {
- let nodes = self.network_graph.nodes.read().unwrap();
+ let mut nodes = self.network_graph.nodes.write().unwrap();
let iter = if let Some(node_id) = starting_point {
nodes.range((Bound::Excluded(node_id), Bound::Unbounded))
} else {
/// to request gossip messages for each channel. The sync is considered complete
/// when the final reply_scids_end message is received, though we are not
/// tracking this directly.
- fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init) -> Result<(), ()> {
+ fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init, _inbound: bool) -> Result<(), ()> {
// We will only perform a sync with peers that support gossip_queries.
if !init_msg.features.supports_gossip_queries() {
// Don't disconnect peers for not supporting gossip queries. We may wish to have
// (has at least one update). A peer may still want to know the channel
// exists even if its not yet routable.
let mut batches: Vec<Vec<u64>> = vec![Vec::with_capacity(MAX_SCIDS_PER_REPLY)];
- let channels = self.network_graph.channels.read().unwrap();
+ let mut channels = self.network_graph.channels.write().unwrap();
for (_, ref chan) in channels.range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap()) {
if let Some(chan_announcement) = &chan.announcement_message {
// Construct a new batch if last one is full
features.set_gossip_queries_optional();
features
}
+
+ fn processing_queue_high(&self) -> bool {
+ self.network_graph.pending_checks.too_many_checks_pending()
+ }
}
-impl<G: Deref<Target=NetworkGraph<L>>, C: Deref, L: Deref> MessageSendEventsProvider for P2PGossipSync<G, C, L>
+impl<G: Deref<Target=NetworkGraph<L>>, U: Deref, L: Deref> MessageSendEventsProvider for P2PGossipSync<G, U, L>
where
- C::Target: chain::Access,
+ U::Target: UtxoLookup,
L::Target: Logger,
{
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
(0, features, required),
(1, announcement_received_time, (default_value, 0)),
(2, node_one, required),
- (4, one_to_two_wrap, ignorable),
+ (4, one_to_two_wrap, upgradable_option),
(6, node_two, required),
- (8, two_to_one_wrap, ignorable),
+ (8, two_to_one_wrap, upgradable_option),
(10, capacity_sats, required),
(12, announcement_message, required),
});
///
/// While this may be smaller than the actual channel capacity, amounts greater than
/// [`Self::as_msat`] should not be routed through the channel.
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EffectiveCapacity {
/// The available liquidity in the channel known from being a channel counterparty, and thus a
/// direct hop.
/// Fees for routing via a given channel or a node
#[derive(Eq, PartialEq, Copy, Clone, Debug, Hash)]
pub struct RoutingFees {
- /// Flat routing fee in satoshis
+ /// Flat routing fee in millisatoshis.
pub base_msat: u32,
/// Liquidity-based routing fee in millionths of a routed amount.
/// In other words, 10000 is 1%.
read_tlv_fields!(reader, {
(0, _lowest_inbound_channel_fees, option),
- (2, announcement_info_wrap, ignorable),
+ (2, announcement_info_wrap, upgradable_option),
(4, channels, vec_type),
});
last_rapid_gossip_sync_timestamp: Mutex::new(last_rapid_gossip_sync_timestamp),
removed_nodes: Mutex::new(HashMap::new()),
removed_channels: Mutex::new(HashMap::new()),
+ pending_checks: utxo::PendingChecks::new(),
})
}
}
impl<L: Deref> NetworkGraph<L> where L::Target: Logger {
/// Creates a new, empty, network graph.
- pub fn new(genesis_hash: BlockHash, logger: L) -> NetworkGraph<L> {
+ pub fn new(network: Network, logger: L) -> NetworkGraph<L> {
Self {
secp_ctx: Secp256k1::verification_only(),
- genesis_hash,
+ genesis_hash: genesis_block(network).header.block_hash(),
logger,
channels: RwLock::new(IndexedMap::new()),
nodes: RwLock::new(IndexedMap::new()),
last_rapid_gossip_sync_timestamp: Mutex::new(None),
removed_channels: Mutex::new(HashMap::new()),
removed_nodes: Mutex::new(HashMap::new()),
+ pending_checks: utxo::PendingChecks::new(),
}
}
}
fn update_node_from_announcement_intern(&self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>) -> Result<(), LightningError> {
- match self.nodes.write().unwrap().get_mut(&msg.node_id) {
- None => Err(LightningError{err: "No existing channels for node_announcement".to_owned(), action: ErrorAction::IgnoreError}),
+ let mut nodes = self.nodes.write().unwrap();
+ match nodes.get_mut(&msg.node_id) {
+ None => {
+ core::mem::drop(nodes);
+ self.pending_checks.check_hold_pending_node_announcement(msg, full_msg)?;
+ Err(LightningError{err: "No existing channels for node_announcement".to_owned(), action: ErrorAction::IgnoreError})
+ },
Some(node) => {
if let Some(node_info) = node.announcement_info.as_ref() {
// The timestamp field is somewhat of a misnomer - the BOLTs use it to order
/// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept
/// routing messages from a source using a protocol other than the lightning P2P protocol.
///
- /// If a `chain::Access` object is provided via `chain_access`, it will be called to verify
+ /// If a [`UtxoLookup`] object is provided via `utxo_lookup`, it will be called to verify
/// the corresponding UTXO exists on chain and is correctly-formatted.
- pub fn update_channel_from_announcement<C: Deref>(
- &self, msg: &msgs::ChannelAnnouncement, chain_access: &Option<C>,
+ pub fn update_channel_from_announcement<U: Deref>(
+ &self, msg: &msgs::ChannelAnnouncement, utxo_lookup: &Option<U>,
) -> Result<(), LightningError>
where
- C::Target: chain::Access,
+ U::Target: UtxoLookup,
{
let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]);
secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.node_signature_1, &get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement"), "channel_announcement");
secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.node_signature_2, &get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement"), "channel_announcement");
secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement"), "channel_announcement");
secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_2, "channel_announcement"), "channel_announcement");
- self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), chain_access)
+ self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), utxo_lookup)
}
/// Store or update channel info from a channel announcement without verifying the associated
/// signatures. Because we aren't given the associated signatures here we cannot relay the
/// channel announcement to any of our peers.
///
- /// If a `chain::Access` object is provided via `chain_access`, it will be called to verify
+ /// If a [`UtxoLookup`] object is provided via `utxo_lookup`, it will be called to verify
/// the corresponding UTXO exists on chain and is correctly-formatted.
- pub fn update_channel_from_unsigned_announcement<C: Deref>(
- &self, msg: &msgs::UnsignedChannelAnnouncement, chain_access: &Option<C>
+ pub fn update_channel_from_unsigned_announcement<U: Deref>(
+ &self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option<U>
) -> Result<(), LightningError>
where
- C::Target: chain::Access,
+ U::Target: UtxoLookup,
{
- self.update_channel_from_unsigned_announcement_intern(msg, None, chain_access)
+ self.update_channel_from_unsigned_announcement_intern(msg, None, utxo_lookup)
}
/// Update channel from partial announcement data received via rapid gossip sync
Ok(())
}
- fn update_channel_from_unsigned_announcement_intern<C: Deref>(
- &self, msg: &msgs::UnsignedChannelAnnouncement, full_msg: Option<&msgs::ChannelAnnouncement>, chain_access: &Option<C>
+ fn update_channel_from_unsigned_announcement_intern<U: Deref>(
+ &self, msg: &msgs::UnsignedChannelAnnouncement, full_msg: Option<&msgs::ChannelAnnouncement>, utxo_lookup: &Option<U>
) -> Result<(), LightningError>
where
- C::Target: chain::Access,
+ U::Target: UtxoLookup,
{
if msg.node_id_1 == msg.node_id_2 || msg.bitcoin_key_1 == msg.bitcoin_key_2 {
return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError});
action: ErrorAction::IgnoreDuplicateGossip
});
}
- } else if chain_access.is_none() {
+ } else if utxo_lookup.is_none() {
// Similarly, if we can't check the chain right now anyway, ignore the
// duplicate announcement without bothering to take the channels write lock.
return Err(LightningError {
}
}
- let utxo_value = match &chain_access {
- &None => {
- // Tentatively accept, potentially exposing us to DoS attacks
- None
- },
- &Some(ref chain_access) => {
- match chain_access.get_utxo(&msg.chain_hash, msg.short_channel_id) {
- Ok(TxOut { value, script_pubkey }) => {
- let expected_script =
- make_funding_redeemscript_from_slices(msg.bitcoin_key_1.as_slice(), msg.bitcoin_key_2.as_slice()).to_v0_p2wsh();
- if script_pubkey != expected_script {
- return Err(LightningError{err: format!("Channel announcement key ({}) didn't match on-chain script ({})", expected_script.to_hex(), script_pubkey.to_hex()), action: ErrorAction::IgnoreError});
- }
- //TODO: Check if value is worth storing, use it to inform routing, and compare it
- //to the new HTLC max field in channel_update
- Some(value)
- },
- Err(chain::AccessError::UnknownChain) => {
- return Err(LightningError{err: format!("Channel announced on an unknown chain ({})", msg.chain_hash.encode().to_hex()), action: ErrorAction::IgnoreError});
- },
- Err(chain::AccessError::UnknownTx) => {
- return Err(LightningError{err: "Channel announced without corresponding UTXO entry".to_owned(), action: ErrorAction::IgnoreError});
- },
- }
- },
- };
+ let utxo_value = self.pending_checks.check_channel_announcement(
+ utxo_lookup, msg, full_msg)?;
#[allow(unused_mut, unused_assignments)]
let mut announcement_received_time = 0;
announcement_received_time,
};
- self.add_channel_between_nodes(msg.short_channel_id, chan_info, utxo_value)
+ self.add_channel_between_nodes(msg.short_channel_id, chan_info, utxo_value)?;
+
+ log_gossip!(self.logger, "Added channel_announcement for {}{}", msg.short_channel_id, if !msg.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" });
+ Ok(())
}
/// Marks a channel in the graph as failed if a corresponding HTLC fail was sent.
let mut channels = self.channels.write().unwrap();
match channels.get_mut(&msg.short_channel_id) {
- None => return Err(LightningError{err: "Couldn't find channel for update".to_owned(), action: ErrorAction::IgnoreError}),
+ None => {
+ core::mem::drop(channels);
+ self.pending_checks.check_hold_pending_channel_update(msg, full_msg)?;
+ return Err(LightningError{err: "Couldn't find channel for update".to_owned(), action: ErrorAction::IgnoreError});
+ },
Some(channel) => {
if msg.htlc_maximum_msat > MAX_VALUE_MSAT {
return Err(LightningError{err:
}
#[cfg(test)]
-mod tests {
- use crate::chain;
+pub(crate) mod tests {
use crate::ln::channelmanager;
use crate::ln::chan_utils::make_funding_redeemscript;
#[cfg(feature = "std")]
use crate::ln::features::InitFeatures;
use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate, NodeAlias, MAX_EXCESS_BYTES_FOR_RELAY, NodeId, RoutingFees, ChannelUpdateInfo, ChannelInfo, NodeAnnouncementInfo, NodeInfo};
+ use crate::routing::utxo::{UtxoLookupError, UtxoResult};
use crate::ln::msgs::{RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement,
UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate,
ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT};
use crate::sync::Arc;
fn create_network_graph() -> NetworkGraph<Arc<test_utils::TestLogger>> {
- let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
let logger = Arc::new(test_utils::TestLogger::new());
- NetworkGraph::new(genesis_hash, logger)
+ NetworkGraph::new(Network::Testnet, logger)
}
fn create_gossip_sync(network_graph: &NetworkGraph<Arc<test_utils::TestLogger>>) -> (
assert!(!gossip_sync.should_request_full_sync(&node_id));
}
- fn get_signed_node_announcement<F: Fn(&mut UnsignedNodeAnnouncement)>(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> NodeAnnouncement {
+ pub(crate) fn get_signed_node_announcement<F: Fn(&mut UnsignedNodeAnnouncement)>(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> NodeAnnouncement {
let node_id = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_key));
let mut unsigned_announcement = UnsignedNodeAnnouncement {
features: channelmanager::provided_node_features(&UserConfig::default()),
}
}
- fn get_signed_channel_announcement<F: Fn(&mut UnsignedChannelAnnouncement)>(f: F, node_1_key: &SecretKey, node_2_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> ChannelAnnouncement {
+ pub(crate) fn get_signed_channel_announcement<F: Fn(&mut UnsignedChannelAnnouncement)>(f: F, node_1_key: &SecretKey, node_2_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> ChannelAnnouncement {
let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_key);
let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_key);
let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap();
}
}
- fn get_channel_script(secp_ctx: &Secp256k1<secp256k1::All>) -> Script {
+ pub(crate) fn get_channel_script(secp_ctx: &Secp256k1<secp256k1::All>) -> Script {
let node_1_btckey = SecretKey::from_slice(&[40; 32]).unwrap();
let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap();
make_funding_redeemscript(&PublicKey::from_secret_key(secp_ctx, &node_1_btckey),
&PublicKey::from_secret_key(secp_ctx, &node_2_btckey)).to_v0_p2wsh()
}
- fn get_signed_channel_update<F: Fn(&mut UnsignedChannelUpdate)>(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> ChannelUpdate {
+ pub(crate) fn get_signed_channel_update<F: Fn(&mut UnsignedChannelUpdate)>(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> ChannelUpdate {
let mut unsigned_channel_update = UnsignedChannelUpdate {
chain_hash: genesis_block(Network::Testnet).header.block_hash(),
short_channel_id: 0,
let valid_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
// Test if the UTXO lookups were not supported
- let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
- let network_graph = NetworkGraph::new(genesis_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Testnet, &logger);
let mut gossip_sync = P2PGossipSync::new(&network_graph, None, &logger);
match gossip_sync.handle_channel_announcement(&valid_announcement) {
Ok(res) => assert!(res),
// Test if an associated transaction were not on-chain (or not confirmed).
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
- *chain_source.utxo_ret.lock().unwrap() = Err(chain::AccessError::UnknownTx);
- let network_graph = NetworkGraph::new(genesis_hash, &logger);
+ *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Err(UtxoLookupError::UnknownTx));
+ let network_graph = NetworkGraph::new(Network::Testnet, &logger);
gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger);
let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| {
};
// Now test if the transaction is found in the UTXO set and the script is correct.
- *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: 0, script_pubkey: good_script.clone() });
+ *chain_source.utxo_ret.lock().unwrap() =
+ UtxoResult::Sync(Ok(TxOut { value: 0, script_pubkey: good_script.clone() }));
let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| {
unsigned_announcement.short_channel_id += 2;
}, node_1_privkey, node_2_privkey, &secp_ctx);
// If we receive announcement for the same channel, once we've validated it against the
// chain, we simply ignore all new (duplicate) announcements.
- *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: 0, script_pubkey: good_script });
+ *chain_source.utxo_ret.lock().unwrap() =
+ UtxoResult::Sync(Ok(TxOut { value: 0, script_pubkey: good_script }));
match gossip_sync.handle_channel_announcement(&valid_announcement) {
Ok(_) => panic!(),
Err(e) => assert_eq!(e.err, "Already have chain-validated channel")
let secp_ctx = Secp256k1::new();
let logger = test_utils::TestLogger::new();
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
- let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
- let network_graph = NetworkGraph::new(genesis_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Testnet, &logger);
let gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger);
let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
{
// Announce a channel we will update
let good_script = get_channel_script(&secp_ctx);
- *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: amount_sats, script_pubkey: good_script.clone() });
+ *chain_source.utxo_ret.lock().unwrap() =
+ UtxoResult::Sync(Ok(TxOut { value: amount_sats, script_pubkey: good_script.clone() }));
let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
short_channel_id = valid_channel_announcement.contents.short_channel_id;
#[test]
fn handling_network_update() {
let logger = test_utils::TestLogger::new();
- let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
- let network_graph = NetworkGraph::new(genesis_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Testnet, &logger);
let secp_ctx = Secp256k1::new();
let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
{
// Get a new network graph since we don't want to track removed nodes in this test with "std"
- let network_graph = NetworkGraph::new(genesis_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Testnet, &logger);
// Announce a channel to test permanent node failure
let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
// Test the removal of channels with `remove_stale_channels_and_tracking`.
let logger = test_utils::TestLogger::new();
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
- let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
- let network_graph = NetworkGraph::new(genesis_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Testnet, &logger);
let gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger);
let secp_ctx = Secp256k1::new();
// It should ignore if gossip_queries feature is not enabled
{
let init_msg = Init { features: InitFeatures::empty(), remote_network_address: None };
- gossip_sync.peer_connected(&node_id_1, &init_msg).unwrap();
+ gossip_sync.peer_connected(&node_id_1, &init_msg, true).unwrap();
let events = gossip_sync.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 0);
}
let mut features = InitFeatures::empty();
features.set_gossip_queries_optional();
let init_msg = Init { features, remote_network_address: None };
- gossip_sync.peer_connected(&node_id_1, &init_msg).unwrap();
+ gossip_sync.peer_connected(&node_id_1, &init_msg, true).unwrap();
let events = gossip_sync.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match &events[0] {
//! Structs and impls for receiving messages about the network and storing the topology live here.
+pub mod utxo;
pub mod gossip;
pub mod router;
pub mod scoring;
// You may not use this file except in accordance with one or both of these
// licenses.
-//! The top-level routing/network map tracking logic lives here.
-//!
-//! You probably want to create a P2PGossipSync and use that as your RoutingMessageHandler and then
-//! interrogate it to get routes for your own payments.
+//! The router finds paths within a [`NetworkGraph`] for a payment.
use bitcoin::secp256k1::PublicKey;
use bitcoin::hashes::Hash;
use crate::ln::msgs::{DecodeError, ErrorAction, LightningError, MAX_VALUE_MSAT};
use crate::routing::gossip::{DirectedChannelInfo, EffectiveCapacity, ReadOnlyNetworkGraph, NetworkGraph, NodeId, RoutingFees};
use crate::routing::scoring::{ChannelUsage, LockableScore, Score};
-use crate::util::ser::{Writeable, Readable, Writer};
+use crate::util::ser::{Writeable, Readable, ReadableArgs, Writer};
use crate::util::logger::{Level, Logger};
use crate::util::chacha20::ChaCha20;
&random_seed_bytes
)
}
-
- fn notify_payment_path_failed(&self, path: &[&RouteHop], short_channel_id: u64) {
- self.scorer.lock().payment_path_failed(path, short_channel_id);
- }
-
- fn notify_payment_path_successful(&self, path: &[&RouteHop]) {
- self.scorer.lock().payment_path_successful(path);
- }
-
- fn notify_payment_probe_successful(&self, path: &[&RouteHop]) {
- self.scorer.lock().probe_successful(path);
- }
-
- fn notify_payment_probe_failed(&self, path: &[&RouteHop], short_channel_id: u64) {
- self.scorer.lock().probe_failed(path, short_channel_id);
- }
}
/// A trait defining behavior for routing a payment.
) -> Result<Route, LightningError> {
self.find_route(payer, route_params, first_hops, inflight_htlcs)
}
- /// Lets the router know that payment through a specific path has failed.
- fn notify_payment_path_failed(&self, path: &[&RouteHop], short_channel_id: u64);
- /// Lets the router know that payment through a specific path was successful.
- fn notify_payment_path_successful(&self, path: &[&RouteHop]);
- /// Lets the router know that a payment probe was successful.
- fn notify_payment_probe_successful(&self, path: &[&RouteHop]);
- /// Lets the router know that a payment probe failed.
- fn notify_payment_probe_failed(&self, path: &[&RouteHop], short_channel_id: u64);
}
/// [`Score`] implementation that factors in in-flight HTLC liquidity.
fn read<R: io::Read>(reader: &mut R) -> Result<Route, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let path_count: u64 = Readable::read(reader)?;
+ if path_count == 0 { return Err(DecodeError::InvalidValue); }
let mut paths = Vec::with_capacity(cmp::min(path_count, 128) as usize);
+ let mut min_final_cltv_expiry_delta = u32::max_value();
for _ in 0..path_count {
let hop_count: u8 = Readable::read(reader)?;
- let mut hops = Vec::with_capacity(hop_count as usize);
+ let mut hops: Vec<RouteHop> = Vec::with_capacity(hop_count as usize);
for _ in 0..hop_count {
hops.push(Readable::read(reader)?);
}
+ if hops.is_empty() { return Err(DecodeError::InvalidValue); }
+ min_final_cltv_expiry_delta =
+ cmp::min(min_final_cltv_expiry_delta, hops.last().unwrap().cltv_expiry_delta);
paths.push(hops);
}
let mut payment_params = None;
read_tlv_fields!(reader, {
- (1, payment_params, option),
+ (1, payment_params, (option: ReadableArgs, min_final_cltv_expiry_delta)),
});
Ok(Route { paths, payment_params })
}
/// Parameters needed to find a [`Route`].
///
/// Passed to [`find_route`] and [`build_route_from_hops`], but also provided in
-/// [`Event::PaymentPathFailed`] for retrying a failed payment path.
+/// [`Event::PaymentPathFailed`].
///
/// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
#[derive(Clone, Debug, PartialEq, Eq)]
/// The amount in msats sent on the failed payment path.
pub final_value_msat: u64,
+}
- /// The CLTV on the final hop of the failed payment path.
- ///
- /// This field is deprecated, [`PaymentParameters::final_cltv_expiry_delta`] should be used
- /// instead, if available.
- pub final_cltv_expiry_delta: u32,
+impl Writeable for RouteParameters {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ write_tlv_fields!(writer, {
+ (0, self.payment_params, required),
+ (2, self.final_value_msat, required),
+ // LDK versions prior to 0.0.114 had the `final_cltv_expiry_delta` parameter in
+ // `RouteParameters` directly. For compatibility, we write it here.
+ (4, self.payment_params.final_cltv_expiry_delta, required),
+ });
+ Ok(())
+ }
}
-impl_writeable_tlv_based!(RouteParameters, {
- (0, payment_params, required),
- (2, final_value_msat, required),
- (4, final_cltv_expiry_delta, required),
-});
+impl Readable for RouteParameters {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ _init_and_read_tlv_fields!(reader, {
+ (0, payment_params, (required: ReadableArgs, 0)),
+ (2, final_value_msat, required),
+ (4, final_cltv_expiry_delta, required),
+ });
+ let mut payment_params: PaymentParameters = payment_params.0.unwrap();
+ if payment_params.final_cltv_expiry_delta == 0 {
+ payment_params.final_cltv_expiry_delta = final_cltv_expiry_delta.0.unwrap();
+ }
+ Ok(Self {
+ payment_params,
+ final_value_msat: final_value_msat.0.unwrap(),
+ })
+ }
+}
/// Maximum total CTLV difference we allow for a full payment path.
pub const DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA: u32 = 1008;
/// these SCIDs.
pub previously_failed_channels: Vec<u64>,
- /// The minimum CLTV delta at the end of the route.
- ///
- /// This field should always be set to `Some` and may be required in a future release.
- pub final_cltv_expiry_delta: Option<u32>,
+ /// The minimum CLTV delta at the end of the route. This value must not be zero.
+ pub final_cltv_expiry_delta: u32,
+}
+
+impl Writeable for PaymentParameters {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ write_tlv_fields!(writer, {
+ (0, self.payee_pubkey, required),
+ (1, self.max_total_cltv_expiry_delta, required),
+ (2, self.features, option),
+ (3, self.max_path_count, required),
+ (4, self.route_hints, vec_type),
+ (5, self.max_channel_saturation_power_of_half, required),
+ (6, self.expiry_time, option),
+ (7, self.previously_failed_channels, vec_type),
+ (9, self.final_cltv_expiry_delta, required),
+ });
+ Ok(())
+ }
+}
+
+impl ReadableArgs<u32> for PaymentParameters {
+ fn read<R: io::Read>(reader: &mut R, default_final_cltv_expiry_delta: u32) -> Result<Self, DecodeError> {
+ _init_and_read_tlv_fields!(reader, {
+ (0, payee_pubkey, required),
+ (1, max_total_cltv_expiry_delta, (default_value, DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA)),
+ (2, features, option),
+ (3, max_path_count, (default_value, DEFAULT_MAX_PATH_COUNT)),
+ (4, route_hints, vec_type),
+ (5, max_channel_saturation_power_of_half, (default_value, 2)),
+ (6, expiry_time, option),
+ (7, previously_failed_channels, vec_type),
+ (9, final_cltv_expiry_delta, (default_value, default_final_cltv_expiry_delta)),
+ });
+ Ok(Self {
+ payee_pubkey: _init_tlv_based_struct_field!(payee_pubkey, required),
+ max_total_cltv_expiry_delta: _init_tlv_based_struct_field!(max_total_cltv_expiry_delta, (default_value, unused)),
+ features,
+ max_path_count: _init_tlv_based_struct_field!(max_path_count, (default_value, unused)),
+ route_hints: route_hints.unwrap_or(Vec::new()),
+ max_channel_saturation_power_of_half: _init_tlv_based_struct_field!(max_channel_saturation_power_of_half, (default_value, unused)),
+ expiry_time,
+ previously_failed_channels: previously_failed_channels.unwrap_or(Vec::new()),
+ final_cltv_expiry_delta: _init_tlv_based_struct_field!(final_cltv_expiry_delta, (default_value, unused)),
+ })
+ }
}
-impl_writeable_tlv_based!(PaymentParameters, {
- (0, payee_pubkey, required),
- (1, max_total_cltv_expiry_delta, (default_value, DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA)),
- (2, features, option),
- (3, max_path_count, (default_value, DEFAULT_MAX_PATH_COUNT)),
- (4, route_hints, vec_type),
- (5, max_channel_saturation_power_of_half, (default_value, 2)),
- (6, expiry_time, option),
- (7, previously_failed_channels, vec_type),
- (9, final_cltv_expiry_delta, option),
-});
impl PaymentParameters {
/// Creates a payee with the node id of the given `pubkey`.
max_path_count: DEFAULT_MAX_PATH_COUNT,
max_channel_saturation_power_of_half: 2,
previously_failed_channels: Vec::new(),
- final_cltv_expiry_delta: Some(final_cltv_expiry_delta),
+ final_cltv_expiry_delta,
}
}
) -> Result<Route, LightningError>
where L::Target: Logger, GL::Target: Logger {
let graph_lock = network_graph.read_only();
- let final_cltv_expiry_delta =
- if let Some(delta) = route_params.payment_params.final_cltv_expiry_delta { delta }
- else { route_params.final_cltv_expiry_delta };
+ let final_cltv_expiry_delta = route_params.payment_params.final_cltv_expiry_delta;
let mut route = get_route(our_node_pubkey, &route_params.payment_params, &graph_lock, first_hops,
route_params.final_value_msat, final_cltv_expiry_delta, logger, scorer,
random_seed_bytes)?;
if payment_params.max_total_cltv_expiry_delta <= final_cltv_expiry_delta {
return Err(LightningError{err: "Can't find a route where the maximum total CLTV expiry delta is below the final CLTV expiry.".to_owned(), action: ErrorAction::IgnoreError});
}
- if let Some(delta) = payment_params.final_cltv_expiry_delta {
- debug_assert_eq!(delta, final_cltv_expiry_delta);
- }
+
+ // TODO: Remove the explicit final_cltv_expiry_delta parameter
+ debug_assert_eq!(final_cltv_expiry_delta, payment_params.final_cltv_expiry_delta);
// The general routing idea is the following:
// 1. Fill first/last hops communicated by the caller.
let graph_lock = network_graph.read_only();
let mut route = build_route_from_hops_internal(
our_node_pubkey, hops, &route_params.payment_params, &graph_lock,
- route_params.final_value_msat, route_params.final_cltv_expiry_delta, logger, random_seed_bytes)?;
+ route_params.final_value_msat, route_params.payment_params.final_cltv_expiry_delta,
+ logger, random_seed_bytes)?;
add_random_cltv_offset(&mut route, &route_params.payment_params, &graph_lock, random_seed_bytes);
Ok(route)
}
#[cfg(test)]
mod tests {
use crate::routing::gossip::{NetworkGraph, P2PGossipSync, NodeId, EffectiveCapacity};
+ use crate::routing::utxo::UtxoResult;
use crate::routing::router::{get_route, build_route_from_hops_internal, add_random_cltv_offset, default_node_features,
PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RoutingFees,
DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, MAX_PATH_LENGTH_ESTIMATE};
- use crate::routing::scoring::{ChannelUsage, Score, ProbabilisticScorer, ProbabilisticScoringParameters};
+ use crate::routing::scoring::{ChannelUsage, FixedPenaltyScorer, Score, ProbabilisticScorer, ProbabilisticScoringParameters};
use crate::routing::test_utils::{add_channel, add_or_update_node, build_graph, build_line_graph, id_to_feature_flags, get_nodes, update_channel};
use crate::chain::transaction::OutPoint;
use crate::chain::keysinterface::EntropySource;
inbound_htlc_minimum_msat: None,
inbound_htlc_maximum_msat: None,
config: None,
+ feerate_sat_per_1000_weight: None
}
}
let (secp_ctx, network_graph, _, _, logger) = build_graph();
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
let payment_params = PaymentParameters::from_node_id(nodes[2], 42);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let (secp_ctx, network_graph, _, _, logger) = build_graph();
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
let payment_params = PaymentParameters::from_node_id(nodes[2], 42);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
let payment_params = PaymentParameters::from_node_id(nodes[2], 42);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
let config = UserConfig::default();
let payment_params = PaymentParameters::from_node_id(nodes[2], 42).with_features(channelmanager::provided_invoice_features(&config));
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
let payment_params = PaymentParameters::from_node_id(nodes[2], 42);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
let (_, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
let payment_params = PaymentParameters::from_node_id(nodes[2], 42);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
fn our_chans_test() {
let (secp_ctx, network_graph, _, _, logger) = build_graph();
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
fn partial_route_hint_test() {
let (secp_ctx, network_graph, _, _, logger) = build_graph();
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let (secp_ctx, network_graph, _, _, logger) = build_graph();
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(empty_last_hop(&nodes));
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let (_, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
let last_hops = multi_hop_last_hops_hint([nodes[2], nodes[3]]);
let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops.clone());
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
// Test through channels 2, 3, 0xff00, 0xff01.
let last_hops = multi_hop_last_hops_hint([nodes[2], non_announced_pubkey]);
let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops.clone());
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
// Test through channels 2, 3, 0xff00, 0xff01.
// Test shows that multiple hop hints are considered.
let (secp_ctx, network_graph, _, _, logger) = build_graph();
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops_with_public_channel(&nodes));
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
// This test shows that public routes can be present in the invoice
fn our_chans_last_hop_connect_test() {
let (secp_ctx, network_graph, _, _, logger) = build_graph();
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
}]);
let payment_params = PaymentParameters::from_node_id(target_node_id, 42).with_route_hints(vec![last_hops]);
let our_chans = vec![get_channel_details(Some(42), middle_node_id, InitFeatures::from_le_bytes(vec![0b11]), outbound_capacity_msat)];
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
- let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
let logger = ln_test_utils::TestLogger::new();
- let network_graph = NetworkGraph::new(genesis_hash, &logger);
+ let network_graph = NetworkGraph::new(Network::Testnet, &logger);
let route = get_route(&source_node_id, &payment_params, &network_graph.read_only(),
Some(&our_chans.iter().collect::<Vec<_>>()), route_val, 42, &logger, &scorer, &random_seed_bytes);
route
let (secp_ctx, network_graph, mut gossip_sync, chain_monitor, logger) = build_graph();
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let config = UserConfig::default();
.push_opcode(opcodes::all::OP_PUSHNUM_2)
.push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh();
- *chain_monitor.utxo_ret.lock().unwrap() = Ok(TxOut { value: 15, script_pubkey: good_script.clone() });
- gossip_sync.add_chain_access(Some(chain_monitor));
+ *chain_monitor.utxo_ret.lock().unwrap() =
+ UtxoResult::Sync(Ok(TxOut { value: 15, script_pubkey: good_script.clone() }));
+ gossip_sync.add_utxo_lookup(Some(chain_monitor));
add_channel(&gossip_sync, &secp_ctx, &privkeys[0], &privkeys[2], ChannelFeatures::from_le_bytes(id_to_feature_flags(3)), 333);
update_channel(&gossip_sync, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
// one of the latter hops is limited.
let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let config = UserConfig::default();
fn ignore_fee_first_hop_test() {
let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let payment_params = PaymentParameters::from_node_id(nodes[2], 42);
fn simple_mpp_route_test() {
let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let config = UserConfig::default();
fn long_mpp_route_test() {
let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let config = UserConfig::default();
fn mpp_cheaper_route_test() {
let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let config = UserConfig::default();
// if the fee is not properly accounted for, the behavior is different.
let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let config = UserConfig::default();
// This bug appeared in production in some specific channel configurations.
let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let config = UserConfig::default();
// path finding we realize that we found more capacity than we need.
let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let config = UserConfig::default();
// payment) htlc_minimum_msat. In the original algorithm, this resulted in node4's
// "previous hop" being set to node 3, creating a loop in the path.
let secp_ctx = Secp256k1::new();
- let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
let logger = Arc::new(ln_test_utils::TestLogger::new());
- let network = Arc::new(NetworkGraph::new(genesis_hash, Arc::clone(&logger)));
+ let network = Arc::new(NetworkGraph::new(Network::Testnet, Arc::clone(&logger)));
let gossip_sync = P2PGossipSync::new(Arc::clone(&network), None, Arc::clone(&logger));
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let payment_params = PaymentParameters::from_node_id(nodes[6], 42);
// we calculated fees on a higher value, resulting in us ignoring such paths.
let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
let (our_privkey, our_id, _, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let payment_params = PaymentParameters::from_node_id(nodes[2], 42);
// resulting in us thinking there is no possible path, even if other paths exist.
let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let config = UserConfig::default();
// route over multiple channels with the same first hop.
let secp_ctx = Secp256k1::new();
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
- let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
let logger = Arc::new(ln_test_utils::TestLogger::new());
- let network_graph = NetworkGraph::new(genesis_hash, Arc::clone(&logger));
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let network_graph = NetworkGraph::new(Network::Testnet, Arc::clone(&logger));
+ let scorer = ln_test_utils::TestScorer::new();
let config = UserConfig::default();
let payment_params = PaymentParameters::from_node_id(nodes[0], 42).with_features(channelmanager::provided_invoice_features(&config));
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops(&nodes));
// Without penalizing each hop 100 msats, a longer path with lower fees is chosen.
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let route = get_route(
// Applying a 100 msat penalty to each hop results in taking channels 7 and 10 to nodes[6]
// from nodes[2] rather than channel 6, 11, and 8, even though the longer path is cheaper.
- let scorer = ln_test_utils::TestScorer::with_penalty(100);
+ let scorer = FixedPenaltyScorer::with_penalty(100);
let route = get_route(
&our_id, &payment_params, &network_graph.read_only(), None, 100, 42,
Arc::clone(&logger), &scorer, &random_seed_bytes
let network_graph = network.read_only();
// A path to nodes[6] exists when no penalties are applied to any channel.
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let route = get_route(
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
let network_graph = network.read_only();
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
// Make sure that generally there is at least one route available
let feasible_max_total_cltv_delta = 1008;
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
let network_graph = network.read_only();
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let mut payment_params = PaymentParameters::from_node_id(nodes[6], 0).with_route_hints(last_hops(&nodes))
.with_max_path_count(1);
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
let network_graph = network.read_only();
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let (secp_ctx, network_graph, _, _, logger) = build_graph();
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops(&nodes));
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let network_graph = network.read_only();
let network_nodes = network_graph.nodes();
let network_channels = network_graph.channels();
- let scorer = ln_test_utils::TestScorer::with_penalty(0);
+ let scorer = ln_test_utils::TestScorer::new();
let payment_params = PaymentParameters::from_node_id(nodes[3], 0);
let keys_manager = ln_test_utils::TestKeysInterface::new(&[4u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
inbound_htlc_minimum_msat: None,
inbound_htlc_maximum_msat: None,
config: None,
+ feerate_sat_per_1000_weight: None,
}
}
}
/// Proposed use of a channel passed as a parameter to [`Score::channel_penalty_msat`].
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, PartialEq)]
pub struct ChannelUsage {
/// The amount to send through the channel, denominated in millisatoshis.
pub amount_msat: u64,
impl HistoricalBucketRangeTracker {
fn new() -> Self { Self { buckets: [0; 8] } }
- fn track_datapoint(&mut self, bucket_idx: u8) {
+ fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) {
// We have 8 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
// we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
//
//
// The constants were picked experimentally, selecting a decay amount that restricts us
// from overflowing buckets without having to cap them manually.
+
+ // Ensure the bucket index is in the range [0, 7], even if the liquidity offset is zero or
+ // the channel's capacity, though the second should generally never happen.
+ debug_assert!(liquidity_offset_msat <= capacity_msat);
+ let bucket_idx: u8 = (liquidity_offset_msat * 8 / capacity_msat.saturating_add(1))
+ .try_into().unwrap_or(32); // 32 is bogus for 8 buckets, and will be ignored
debug_assert!(bucket_idx < 8);
if bucket_idx < 8 {
for e in self.buckets.iter_mut() {
max_liquidity_offset_msat: L,
min_liquidity_offset_history: BRT,
max_liquidity_offset_history: BRT,
+ inflight_htlc_msat: u64,
capacity_msat: u64,
last_updated: U,
now: T,
let log_direction = |source, target| {
if let Some((directed_info, _)) = chan_debug.as_directed_to(target) {
let amt = directed_info.effective_capacity().as_msat();
- let dir_liq = liq.as_directed(source, target, amt, &self.params);
+ let dir_liq = liq.as_directed(source, target, 0, amt, &self.params);
let buckets = HistoricalMinMaxBuckets {
min_liquidity_offset_history: &dir_liq.min_liquidity_offset_history,
if let Some(liq) = self.channel_liquidities.get(&scid) {
if let Some((directed_info, source)) = chan.as_directed_to(target) {
let amt = directed_info.effective_capacity().as_msat();
- let dir_liq = liq.as_directed(source, target, amt, &self.params);
+ let dir_liq = liq.as_directed(source, target, 0, amt, &self.params);
return Some((dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat()));
}
}
if let Some(liq) = self.channel_liquidities.get(&scid) {
if let Some((directed_info, source)) = chan.as_directed_to(target) {
let amt = directed_info.effective_capacity().as_msat();
- let dir_liq = liq.as_directed(source, target, amt, &self.params);
+ let dir_liq = liq.as_directed(source, target, 0, amt, &self.params);
let buckets = HistoricalMinMaxBuckets {
min_liquidity_offset_history: &dir_liq.min_liquidity_offset_history,
/// Returns a view of the channel liquidity directed from `source` to `target` assuming
/// `capacity_msat`.
fn as_directed<'a>(
- &self, source: &NodeId, target: &NodeId, capacity_msat: u64, params: &'a ProbabilisticScoringParameters
+ &self, source: &NodeId, target: &NodeId, inflight_htlc_msat: u64, capacity_msat: u64,
+ params: &'a ProbabilisticScoringParameters
) -> DirectedChannelLiquidity<'a, &u64, &HistoricalBucketRangeTracker, T, &T> {
let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) =
if source < target {
max_liquidity_offset_msat,
min_liquidity_offset_history,
max_liquidity_offset_history,
+ inflight_htlc_msat,
capacity_msat,
last_updated: &self.last_updated,
now: T::now(),
/// Returns a mutable view of the channel liquidity directed from `source` to `target` assuming
/// `capacity_msat`.
fn as_directed_mut<'a>(
- &mut self, source: &NodeId, target: &NodeId, capacity_msat: u64, params: &'a ProbabilisticScoringParameters
+ &mut self, source: &NodeId, target: &NodeId, inflight_htlc_msat: u64, capacity_msat: u64,
+ params: &'a ProbabilisticScoringParameters
) -> DirectedChannelLiquidity<'a, &mut u64, &mut HistoricalBucketRangeTracker, T, &mut T> {
let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) =
if source < target {
max_liquidity_offset_msat,
min_liquidity_offset_history,
max_liquidity_offset_history,
+ inflight_htlc_msat,
capacity_msat,
last_updated: &mut self.last_updated,
now: T::now(),
if params.historical_liquidity_penalty_multiplier_msat != 0 ||
params.historical_liquidity_penalty_amount_multiplier_msat != 0 {
- let payment_amt_64th_bucket = amount_msat * 64 / self.capacity_msat;
+ let payment_amt_64th_bucket = if amount_msat < u64::max_value() / 64 {
+ amount_msat * 64 / self.capacity_msat.saturating_add(1)
+ } else {
+ // Only use 128-bit arithmetic when multiplication will overflow to avoid 128-bit
+ // division. This branch should only be hit in fuzz testing since the amount would
+ // need to be over 2.88 million BTC in practice.
+ ((amount_msat as u128) * 64 / (self.capacity_msat as u128).saturating_add(1))
+ .try_into().unwrap_or(65)
+ };
+ #[cfg(not(fuzzing))]
debug_assert!(payment_amt_64th_bucket <= 64);
if payment_amt_64th_bucket > 64 { return res; }
// If we don't have any valid points (or, once decayed, we have less than a full
// point), redo the non-historical calculation with no liquidity bounds tracked and
// the historical penalty multipliers.
- let max_capacity = self.capacity_msat.saturating_sub(amount_msat).saturating_add(1);
+ let available_capacity = self.available_capacity();
+ let numerator = available_capacity.saturating_sub(amount_msat).saturating_add(1);
+ let denominator = available_capacity.saturating_add(1);
let negative_log10_times_2048 =
- approx::negative_log10_times_2048(max_capacity, self.capacity_msat.saturating_add(1));
+ approx::negative_log10_times_2048(numerator, denominator);
res = res.saturating_add(Self::combined_penalty_msat(amount_msat, negative_log10_times_2048,
params.historical_liquidity_penalty_multiplier_msat,
params.historical_liquidity_penalty_amount_multiplier_msat));
- return res;
}
}
/// Returns the upper bound of the channel liquidity balance in this direction.
fn max_liquidity_msat(&self) -> u64 {
- self.capacity_msat
- .checked_sub(self.decayed_offset_msat(*self.max_liquidity_offset_msat))
- .unwrap_or(0)
+ self.available_capacity()
+ .saturating_sub(self.decayed_offset_msat(*self.max_liquidity_offset_msat))
+ }
+
+ /// Returns the capacity minus the in-flight HTLCs in this direction.
+ fn available_capacity(&self) -> u64 {
+ self.capacity_msat.saturating_sub(self.inflight_htlc_msat)
}
fn decayed_offset_msat(&self, offset_msat: u64) -> u64 {
log_trace!(logger, "Max liquidity of {} is {} (already less than or equal to {})",
chan_descr, existing_max_msat, amount_msat);
}
+ self.update_history_buckets();
}
/// Adjusts the channel liquidity balance bounds when failing to route `amount_msat` downstream.
log_trace!(logger, "Min liquidity of {} is {} (already greater than or equal to {})",
chan_descr, existing_min_msat, amount_msat);
}
+ self.update_history_buckets();
}
/// Adjusts the channel liquidity balance bounds when successfully routing `amount_msat`.
let max_liquidity_msat = self.max_liquidity_msat().checked_sub(amount_msat).unwrap_or(0);
log_debug!(logger, "Subtracting {} from max liquidity of {} (setting it to {})", amount_msat, chan_descr, max_liquidity_msat);
self.set_max_liquidity_msat(max_liquidity_msat);
+ self.update_history_buckets();
}
fn update_history_buckets(&mut self) {
self.min_liquidity_offset_history.time_decay_data(half_lives);
self.max_liquidity_offset_history.time_decay_data(half_lives);
- debug_assert!(*self.min_liquidity_offset_msat <= self.capacity_msat);
+ let min_liquidity_offset_msat = self.decayed_offset_msat(*self.min_liquidity_offset_msat);
self.min_liquidity_offset_history.track_datapoint(
- // Ensure the bucket index we pass is in the range [0, 7], even if the liquidity offset
- // is zero or the channel's capacity, though the second should generally never happen.
- (self.min_liquidity_offset_msat.saturating_sub(1) * 8 / self.capacity_msat)
- .try_into().unwrap_or(32)); // 32 is bogus for 8 buckets, and will be ignored
- debug_assert!(*self.max_liquidity_offset_msat <= self.capacity_msat);
+ min_liquidity_offset_msat, self.capacity_msat
+ );
+ let max_liquidity_offset_msat = self.decayed_offset_msat(*self.max_liquidity_offset_msat);
self.max_liquidity_offset_history.track_datapoint(
- // Ensure the bucket index we pass is in the range [0, 7], even if the liquidity offset
- // is zero or the channel's capacity, though the second should generally never happen.
- (self.max_liquidity_offset_msat.saturating_sub(1) * 8 / self.capacity_msat)
- .try_into().unwrap_or(32)); // 32 is bogus for 8 buckets, and will be ignored
+ max_liquidity_offset_msat, self.capacity_msat
+ );
}
/// Adjusts the lower bound of the channel liquidity balance in this direction.
} else {
self.decayed_offset_msat(*self.max_liquidity_offset_msat)
};
- self.update_history_buckets();
*self.last_updated = self.now;
}
} else {
self.decayed_offset_msat(*self.min_liquidity_offset_msat)
};
- self.update_history_buckets();
*self.last_updated = self.now;
}
}
}
let amount_msat = usage.amount_msat;
- let capacity_msat = usage.effective_capacity.as_msat()
- .saturating_sub(usage.inflight_htlc_msat);
+ let capacity_msat = usage.effective_capacity.as_msat();
+ let inflight_htlc_msat = usage.inflight_htlc_msat;
self.channel_liquidities
.get(&short_channel_id)
.unwrap_or(&ChannelLiquidity::new())
- .as_directed(source, target, capacity_msat, &self.params)
+ .as_directed(source, target, inflight_htlc_msat, capacity_msat, &self.params)
.penalty_msat(amount_msat, &self.params)
.saturating_add(anti_probing_penalty_msat)
.saturating_add(base_penalty_msat)
self.channel_liquidities
.entry(hop.short_channel_id)
.or_insert_with(ChannelLiquidity::new)
- .as_directed_mut(source, &target, capacity_msat, &self.params)
+ .as_directed_mut(source, &target, 0, capacity_msat, &self.params)
.failed_at_channel(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
} else {
self.channel_liquidities
.entry(hop.short_channel_id)
.or_insert_with(ChannelLiquidity::new)
- .as_directed_mut(source, &target, capacity_msat, &self.params)
+ .as_directed_mut(source, &target, 0, capacity_msat, &self.params)
.failed_downstream(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
}
} else {
self.channel_liquidities
.entry(hop.short_channel_id)
.or_insert_with(ChannelLiquidity::new)
- .as_directed_mut(source, &target, capacity_msat, &self.params)
+ .as_directed_mut(source, &target, 0, capacity_msat, &self.params)
.successful(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
} else {
log_debug!(self.logger, "Not able to learn for channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
}
fn network_graph(logger: &TestLogger) -> NetworkGraph<&TestLogger> {
- let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
- let mut network_graph = NetworkGraph::new(genesis_hash, logger);
+ let mut network_graph = NetworkGraph::new(Network::Testnet, logger);
add_channel(&mut network_graph, 42, source_privkey(), target_privkey());
add_channel(&mut network_graph, 43, target_privkey(), recipient_privkey());
let chain_source: Option<&crate::util::test_utils::TestChainSource> = None;
network_graph.update_channel_from_announcement(
&signed_announcement, &chain_source).unwrap();
- update_channel(network_graph, short_channel_id, node_1_key, 0);
- update_channel(network_graph, short_channel_id, node_2_key, 1);
+ update_channel(network_graph, short_channel_id, node_1_key, 0, 1_000);
+ update_channel(network_graph, short_channel_id, node_2_key, 1, 0);
}
fn update_channel(
network_graph: &mut NetworkGraph<&TestLogger>, short_channel_id: u64, node_key: SecretKey,
- flags: u8
+ flags: u8, htlc_maximum_msat: u64
) {
let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
let secp_ctx = Secp256k1::new();
flags,
cltv_expiry_delta: 18,
htlc_minimum_msat: 0,
- htlc_maximum_msat: 1_000,
+ htlc_maximum_msat,
fee_base_msat: 1,
fee_proportional_millionths: 0,
excess_data: Vec::new(),
// Update minimum liquidity.
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 100);
assert_eq!(liquidity.max_liquidity_msat(), 300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 900);
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&source, &target, 1_000, &scorer.params)
+ .as_directed_mut(&source, &target, 0, 1_000, &scorer.params)
.set_min_liquidity_msat(200);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 200);
assert_eq!(liquidity.max_liquidity_msat(), 300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 800);
// Update maximum liquidity.
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&target, &recipient, 1_000, &scorer.params);
+ .as_directed(&target, &recipient, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 900);
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&recipient, &target, 1_000, &scorer.params);
+ .as_directed(&recipient, &target, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 100);
assert_eq!(liquidity.max_liquidity_msat(), 300);
scorer.channel_liquidities.get_mut(&43).unwrap()
- .as_directed_mut(&target, &recipient, 1_000, &scorer.params)
+ .as_directed_mut(&target, &recipient, 0, 1_000, &scorer.params)
.set_max_liquidity_msat(200);
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&target, &recipient, 1_000, &scorer.params);
+ .as_directed(&target, &recipient, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 200);
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&recipient, &target, 1_000, &scorer.params);
+ .as_directed(&recipient, &target, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 800);
assert_eq!(liquidity.max_liquidity_msat(), 1000);
}
// Check initial bounds.
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 800);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 200);
assert_eq!(liquidity.max_liquidity_msat(), 600);
// Reset from source to target.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&source, &target, 1_000, &scorer.params)
+ .as_directed_mut(&source, &target, 0, 1_000, &scorer.params)
.set_min_liquidity_msat(900);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 900);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 100);
// Reset from target to source.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&target, &source, 1_000, &scorer.params)
+ .as_directed_mut(&target, &source, 0, 1_000, &scorer.params)
.set_min_liquidity_msat(400);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 600);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
}
// Check initial bounds.
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 800);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 200);
assert_eq!(liquidity.max_liquidity_msat(), 600);
// Reset from source to target.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&source, &target, 1_000, &scorer.params)
+ .as_directed_mut(&source, &target, 0, 1_000, &scorer.params)
.set_max_liquidity_msat(300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
// Reset from target to source.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&target, &source, 1_000, &scorer.params)
+ .as_directed_mut(&target, &source, 0, 1_000, &scorer.params)
.set_max_liquidity_msat(600);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, &scorer.params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 600);
}
// we do not score such channels.
let secp_ctx = Secp256k1::new();
let logger = TestLogger::new();
- let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
- let mut network_graph = NetworkGraph::new(genesis_hash, &logger);
+ let mut network_graph = NetworkGraph::new(Network::Testnet, &logger);
let secret_a = SecretKey::from_slice(&[42; 32]).unwrap();
let secret_b = SecretKey::from_slice(&[43; 32]).unwrap();
let secret_c = SecretKey::from_slice(&[44; 32]).unwrap();
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
let params = ProbabilisticScoringParameters {
+ liquidity_offset_half_life: Duration::from_secs(60 * 60),
historical_liquidity_penalty_multiplier_msat: 1024,
historical_liquidity_penalty_amount_multiplier_msat: 1024,
historical_no_updates_half_life: Duration::from_secs(10),
// data entirely instead.
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
Some(([0; 8], [0; 8])));
+
+ let usage = ChannelUsage {
+ amount_msat: 100,
+ inflight_htlc_msat: 1024,
+ effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
+ };
+ scorer.payment_path_failed(&payment_path_for_amount(1).iter().collect::<Vec<_>>(), 42);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 409);
+
+ let usage = ChannelUsage {
+ amount_msat: 1,
+ inflight_htlc_msat: 0,
+ effective_capacity: EffectiveCapacity::MaximumHTLC { amount_msat: 0 },
+ };
+ assert_eq!(scorer.channel_penalty_msat(42, &target, &source, usage), 2048);
+
+ // Advance to decay all liquidity offsets to zero.
+ SinceEpoch::advance(Duration::from_secs(60 * 60 * 10));
+
+ // Use a path in the opposite direction, which have zero for htlc_maximum_msat. This will
+ // ensure that the effective capacity is zero to test division-by-zero edge cases.
+ let path = vec![
+ path_hop(target_pubkey(), 43, 2),
+ path_hop(source_pubkey(), 42, 1),
+ path_hop(sender_pubkey(), 41, 0),
+ ];
+ scorer.payment_path_failed(&path.iter().collect::<Vec<_>>(), 42);
}
#[test]
let secp_ctx = Secp256k1::new();
let logger = Arc::new(test_utils::TestLogger::new());
let chain_monitor = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
- let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
- let network_graph = Arc::new(NetworkGraph::new(genesis_hash, Arc::clone(&logger)));
+ let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, Arc::clone(&logger)));
let gossip_sync = P2PGossipSync::new(Arc::clone(&network_graph), None, Arc::clone(&logger));
// Build network from our_id to node 19:
let secp_ctx = Secp256k1::new();
let logger = Arc::new(test_utils::TestLogger::new());
let chain_monitor = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
- let genesis_hash = genesis_block(Network::Testnet).header.block_hash();
- let network_graph = Arc::new(NetworkGraph::new(genesis_hash, Arc::clone(&logger)));
+ let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, Arc::clone(&logger)));
let gossip_sync = P2PGossipSync::new(Arc::clone(&network_graph), None, Arc::clone(&logger));
// Build network from our_id to node6:
//
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! This module contains traits for LDK to access UTXOs to check gossip data is correct.
+//!
+//! When lightning nodes gossip channel information, they resist DoS attacks by checking that each
+//! channel matches a UTXO on-chain, requiring at least some marginal on-chain transacting in
+//! order to announce a channel. This module handles that checking.
+
+use bitcoin::{BlockHash, TxOut};
+use bitcoin::hashes::hex::ToHex;
+
+use crate::ln::chan_utils::make_funding_redeemscript_from_slices;
+use crate::ln::msgs::{self, LightningError, ErrorAction};
+use crate::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
+use crate::util::events::MessageSendEvent;
+use crate::util::logger::{Level, Logger};
+use crate::util::ser::Writeable;
+
+use crate::prelude::*;
+
+use alloc::sync::{Arc, Weak};
+use crate::sync::{Mutex, LockTestExt};
+use core::ops::Deref;
+
+/// An error when accessing the chain via [`UtxoLookup`].
+#[derive(Clone, Debug)]
+pub enum UtxoLookupError {
+ /// The requested chain is unknown.
+ UnknownChain,
+
+ /// The requested transaction doesn't exist or hasn't confirmed.
+ UnknownTx,
+}
+
+/// The result of a [`UtxoLookup::get_utxo`] call. A call may resolve either synchronously,
+/// returning the `Sync` variant, or asynchronously, returning an [`UtxoFuture`] in the `Async`
+/// variant.
+#[derive(Clone)]
+pub enum UtxoResult {
+ /// A result which was resolved synchronously. It either includes a [`TxOut`] for the output
+ /// requested or a [`UtxoLookupError`].
+ Sync(Result<TxOut, UtxoLookupError>),
+ /// A result which will be resolved asynchronously. It includes a [`UtxoFuture`], a `clone` of
+ /// which you must keep locally and call [`UtxoFuture::resolve`] on once the lookup completes.
+ ///
+ /// Note that in order to avoid runaway memory usage, the number of parallel checks is limited,
+ /// but only fairly loosely. Because a pending checks block all message processing, leaving
+ /// checks pending for an extended time may cause DoS of other functions. It is recommended you
+ /// keep a tight timeout on lookups, on the order of a few seconds.
+ Async(UtxoFuture),
+}
+
+/// The `UtxoLookup` trait defines behavior for accessing on-chain UTXOs.
+pub trait UtxoLookup {
+ /// Returns the transaction output of a funding transaction encoded by [`short_channel_id`].
+ /// Returns an error if `genesis_hash` is for a different chain or if such a transaction output
+ /// is unknown.
+ ///
+ /// [`short_channel_id`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#definition-of-short_channel_id
+ fn get_utxo(&self, genesis_hash: &BlockHash, short_channel_id: u64) -> UtxoResult;
+}
+
+enum ChannelAnnouncement {
+ Full(msgs::ChannelAnnouncement),
+ Unsigned(msgs::UnsignedChannelAnnouncement),
+}
+impl ChannelAnnouncement {
+ fn node_id_1(&self) -> &NodeId {
+ match self {
+ ChannelAnnouncement::Full(msg) => &msg.contents.node_id_1,
+ ChannelAnnouncement::Unsigned(msg) => &msg.node_id_1,
+ }
+ }
+}
+
+enum NodeAnnouncement {
+ Full(msgs::NodeAnnouncement),
+ Unsigned(msgs::UnsignedNodeAnnouncement),
+}
+impl NodeAnnouncement {
+ fn timestamp(&self) -> u32 {
+ match self {
+ NodeAnnouncement::Full(msg) => msg.contents.timestamp,
+ NodeAnnouncement::Unsigned(msg) => msg.timestamp,
+ }
+ }
+}
+
+enum ChannelUpdate {
+ Full(msgs::ChannelUpdate),
+ Unsigned(msgs::UnsignedChannelUpdate),
+}
+impl ChannelUpdate {
+ fn timestamp(&self) -> u32 {
+ match self {
+ ChannelUpdate::Full(msg) => msg.contents.timestamp,
+ ChannelUpdate::Unsigned(msg) => msg.timestamp,
+ }
+ }
+}
+
+struct UtxoMessages {
+ complete: Option<Result<TxOut, UtxoLookupError>>,
+ channel_announce: Option<ChannelAnnouncement>,
+ latest_node_announce_a: Option<NodeAnnouncement>,
+ latest_node_announce_b: Option<NodeAnnouncement>,
+ latest_channel_update_a: Option<ChannelUpdate>,
+ latest_channel_update_b: Option<ChannelUpdate>,
+}
+
+/// Represents a future resolution of a [`UtxoLookup::get_utxo`] query resolving async.
+///
+/// See [`UtxoResult::Async`] and [`UtxoFuture::resolve`] for more info.
+#[derive(Clone)]
+pub struct UtxoFuture {
+ state: Arc<Mutex<UtxoMessages>>,
+}
+
+/// A trivial implementation of [`UtxoLookup`] which is used to call back into the network graph
+/// once we have a concrete resolution of a request.
+struct UtxoResolver(Result<TxOut, UtxoLookupError>);
+impl UtxoLookup for UtxoResolver {
+ fn get_utxo(&self, _genesis_hash: &BlockHash, _short_channel_id: u64) -> UtxoResult {
+ UtxoResult::Sync(self.0.clone())
+ }
+}
+
+impl UtxoFuture {
+ /// Builds a new future for later resolution.
+ pub fn new() -> Self {
+ Self { state: Arc::new(Mutex::new(UtxoMessages {
+ complete: None,
+ channel_announce: None,
+ latest_node_announce_a: None,
+ latest_node_announce_b: None,
+ latest_channel_update_a: None,
+ latest_channel_update_b: None,
+ }))}
+ }
+
+ /// Resolves this future against the given `graph` and with the given `result`.
+ ///
+ /// This is identical to calling [`UtxoFuture::resolve`] with a dummy `gossip`, disabling
+ /// forwarding the validated gossip message onwards to peers.
+ ///
+ /// Because this may cause the [`NetworkGraph`]'s [`processing_queue_high`] to flip, in order
+ /// to allow us to interact with peers again, you should call [`PeerManager::process_events`]
+ /// after this.
+ ///
+ /// [`processing_queue_high`]: crate::ln::msgs::RoutingMessageHandler::processing_queue_high
+ /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
+ pub fn resolve_without_forwarding<L: Deref>(&self,
+ graph: &NetworkGraph<L>, result: Result<TxOut, UtxoLookupError>)
+ where L::Target: Logger {
+ self.do_resolve(graph, result);
+ }
+
+ /// Resolves this future against the given `graph` and with the given `result`.
+ ///
+ /// The given `gossip` is used to broadcast any validated messages onwards to all peers which
+ /// have available buffer space.
+ ///
+ /// Because this may cause the [`NetworkGraph`]'s [`processing_queue_high`] to flip, in order
+ /// to allow us to interact with peers again, you should call [`PeerManager::process_events`]
+ /// after this.
+ ///
+ /// [`processing_queue_high`]: crate::ln::msgs::RoutingMessageHandler::processing_queue_high
+ /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
+ pub fn resolve<L: Deref, G: Deref<Target=NetworkGraph<L>>, U: Deref, GS: Deref<Target = P2PGossipSync<G, U, L>>>(&self,
+ graph: &NetworkGraph<L>, gossip: GS, result: Result<TxOut, UtxoLookupError>
+ ) where L::Target: Logger, U::Target: UtxoLookup {
+ let mut res = self.do_resolve(graph, result);
+ for msg_opt in res.iter_mut() {
+ if let Some(msg) = msg_opt.take() {
+ gossip.forward_gossip_msg(msg);
+ }
+ }
+ }
+
+ fn do_resolve<L: Deref>(&self, graph: &NetworkGraph<L>, result: Result<TxOut, UtxoLookupError>)
+ -> [Option<MessageSendEvent>; 5] where L::Target: Logger {
+ let (announcement, node_a, node_b, update_a, update_b) = {
+ let mut pending_checks = graph.pending_checks.internal.lock().unwrap();
+ let mut async_messages = self.state.lock().unwrap();
+
+ if async_messages.channel_announce.is_none() {
+ // We raced returning to `check_channel_announcement` which hasn't updated
+ // `channel_announce` yet. That's okay, we can set the `complete` field which it will
+ // check once it gets control again.
+ async_messages.complete = Some(result);
+ return [None, None, None, None, None];
+ }
+
+ let announcement_msg = match async_messages.channel_announce.as_ref().unwrap() {
+ ChannelAnnouncement::Full(signed_msg) => &signed_msg.contents,
+ ChannelAnnouncement::Unsigned(msg) => &msg,
+ };
+
+ pending_checks.lookup_completed(announcement_msg, &Arc::downgrade(&self.state));
+
+ (async_messages.channel_announce.take().unwrap(),
+ async_messages.latest_node_announce_a.take(),
+ async_messages.latest_node_announce_b.take(),
+ async_messages.latest_channel_update_a.take(),
+ async_messages.latest_channel_update_b.take())
+ };
+
+ let mut res = [None, None, None, None, None];
+ let mut res_idx = 0;
+
+ // Now that we've updated our internal state, pass the pending messages back through the
+ // network graph with a different `UtxoLookup` which will resolve immediately.
+ // Note that we ignore errors as we don't disconnect peers anyway, so there's nothing to do
+ // with them.
+ let resolver = UtxoResolver(result);
+ match announcement {
+ ChannelAnnouncement::Full(signed_msg) => {
+ if graph.update_channel_from_announcement(&signed_msg, &Some(&resolver)).is_ok() {
+ res[res_idx] = Some(MessageSendEvent::BroadcastChannelAnnouncement {
+ msg: signed_msg, update_msg: None,
+ });
+ res_idx += 1;
+ }
+ },
+ ChannelAnnouncement::Unsigned(msg) => {
+ let _ = graph.update_channel_from_unsigned_announcement(&msg, &Some(&resolver));
+ },
+ }
+
+ for announce in core::iter::once(node_a).chain(core::iter::once(node_b)) {
+ match announce {
+ Some(NodeAnnouncement::Full(signed_msg)) => {
+ if graph.update_node_from_announcement(&signed_msg).is_ok() {
+ res[res_idx] = Some(MessageSendEvent::BroadcastNodeAnnouncement {
+ msg: signed_msg,
+ });
+ res_idx += 1;
+ }
+ },
+ Some(NodeAnnouncement::Unsigned(msg)) => {
+ let _ = graph.update_node_from_unsigned_announcement(&msg);
+ },
+ None => {},
+ }
+ }
+
+ for update in core::iter::once(update_a).chain(core::iter::once(update_b)) {
+ match update {
+ Some(ChannelUpdate::Full(signed_msg)) => {
+ if graph.update_channel(&signed_msg).is_ok() {
+ res[res_idx] = Some(MessageSendEvent::BroadcastChannelUpdate {
+ msg: signed_msg,
+ });
+ res_idx += 1;
+ }
+ },
+ Some(ChannelUpdate::Unsigned(msg)) => {
+ let _ = graph.update_channel_unsigned(&msg);
+ },
+ None => {},
+ }
+ }
+
+ res
+ }
+}
+
+struct PendingChecksContext {
+ channels: HashMap<u64, Weak<Mutex<UtxoMessages>>>,
+ nodes: HashMap<NodeId, Vec<Weak<Mutex<UtxoMessages>>>>,
+}
+
+impl PendingChecksContext {
+ fn lookup_completed(&mut self,
+ msg: &msgs::UnsignedChannelAnnouncement, completed_state: &Weak<Mutex<UtxoMessages>>
+ ) {
+ if let hash_map::Entry::Occupied(e) = self.channels.entry(msg.short_channel_id) {
+ if Weak::ptr_eq(e.get(), &completed_state) {
+ e.remove();
+ }
+ }
+
+ if let hash_map::Entry::Occupied(mut e) = self.nodes.entry(msg.node_id_1) {
+ e.get_mut().retain(|elem| !Weak::ptr_eq(&elem, &completed_state));
+ if e.get().is_empty() { e.remove(); }
+ }
+ if let hash_map::Entry::Occupied(mut e) = self.nodes.entry(msg.node_id_2) {
+ e.get_mut().retain(|elem| !Weak::ptr_eq(&elem, &completed_state));
+ if e.get().is_empty() { e.remove(); }
+ }
+ }
+}
+
+/// A set of messages which are pending UTXO lookups for processing.
+pub(super) struct PendingChecks {
+ internal: Mutex<PendingChecksContext>,
+}
+
+impl PendingChecks {
+ pub(super) fn new() -> Self {
+ PendingChecks { internal: Mutex::new(PendingChecksContext {
+ channels: HashMap::new(), nodes: HashMap::new(),
+ }) }
+ }
+
+ /// Checks if there is a pending `channel_update` UTXO validation for the given channel,
+ /// and, if so, stores the channel message for handling later and returns an `Err`.
+ pub(super) fn check_hold_pending_channel_update(
+ &self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>
+ ) -> Result<(), LightningError> {
+ let mut pending_checks = self.internal.lock().unwrap();
+ if let hash_map::Entry::Occupied(e) = pending_checks.channels.entry(msg.short_channel_id) {
+ let is_from_a = (msg.flags & 1) == 1;
+ match Weak::upgrade(e.get()) {
+ Some(msgs_ref) => {
+ let mut messages = msgs_ref.lock().unwrap();
+ let latest_update = if is_from_a {
+ &mut messages.latest_channel_update_a
+ } else {
+ &mut messages.latest_channel_update_b
+ };
+ if latest_update.is_none() || latest_update.as_ref().unwrap().timestamp() < msg.timestamp {
+ // If the messages we got has a higher timestamp, just blindly assume the
+ // signatures on the new message are correct and drop the old message. This
+ // may cause us to end up dropping valid `channel_update`s if a peer is
+ // malicious, but we should get the correct ones when the node updates them.
+ *latest_update = Some(
+ if let Some(msg) = full_msg { ChannelUpdate::Full(msg.clone()) }
+ else { ChannelUpdate::Unsigned(msg.clone()) });
+ }
+ return Err(LightningError {
+ err: "Awaiting channel_announcement validation to accept channel_update".to_owned(),
+ action: ErrorAction::IgnoreAndLog(Level::Gossip),
+ });
+ },
+ None => { e.remove(); },
+ }
+ }
+ Ok(())
+ }
+
+ /// Checks if there is a pending `node_announcement` UTXO validation for a channel with the
+ /// given node and, if so, stores the channel message for handling later and returns an `Err`.
+ pub(super) fn check_hold_pending_node_announcement(
+ &self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>
+ ) -> Result<(), LightningError> {
+ let mut pending_checks = self.internal.lock().unwrap();
+ if let hash_map::Entry::Occupied(mut e) = pending_checks.nodes.entry(msg.node_id) {
+ let mut found_at_least_one_chan = false;
+ e.get_mut().retain(|node_msgs| {
+ match Weak::upgrade(&node_msgs) {
+ Some(chan_mtx) => {
+ let mut chan_msgs = chan_mtx.lock().unwrap();
+ if let Some(chan_announce) = &chan_msgs.channel_announce {
+ let latest_announce =
+ if *chan_announce.node_id_1() == msg.node_id {
+ &mut chan_msgs.latest_node_announce_a
+ } else {
+ &mut chan_msgs.latest_node_announce_b
+ };
+ if latest_announce.is_none() ||
+ latest_announce.as_ref().unwrap().timestamp() < msg.timestamp
+ {
+ *latest_announce = Some(
+ if let Some(msg) = full_msg { NodeAnnouncement::Full(msg.clone()) }
+ else { NodeAnnouncement::Unsigned(msg.clone()) });
+ }
+ found_at_least_one_chan = true;
+ true
+ } else {
+ debug_assert!(false, "channel_announce is set before struct is added to node map");
+ false
+ }
+ },
+ None => false,
+ }
+ });
+ if e.get().is_empty() { e.remove(); }
+ if found_at_least_one_chan {
+ return Err(LightningError {
+ err: "Awaiting channel_announcement validation to accept node_announcement".to_owned(),
+ action: ErrorAction::IgnoreAndLog(Level::Gossip),
+ });
+ }
+ }
+ Ok(())
+ }
+
+ fn check_replace_previous_entry(msg: &msgs::UnsignedChannelAnnouncement,
+ full_msg: Option<&msgs::ChannelAnnouncement>, replacement: Option<Weak<Mutex<UtxoMessages>>>,
+ pending_channels: &mut HashMap<u64, Weak<Mutex<UtxoMessages>>>
+ ) -> Result<(), msgs::LightningError> {
+ match pending_channels.entry(msg.short_channel_id) {
+ hash_map::Entry::Occupied(mut e) => {
+ // There's already a pending lookup for the given SCID. Check if the messages
+ // are the same and, if so, return immediately (don't bother spawning another
+ // lookup if we haven't gotten that far yet).
+ match Weak::upgrade(&e.get()) {
+ Some(pending_msgs) => {
+ // This may be called with the mutex held on a different UtxoMessages
+ // struct, however in that case we have a global lockorder of new messages
+ // -> old messages, which makes this safe.
+ let pending_matches = match &pending_msgs.unsafe_well_ordered_double_lock_self().channel_announce {
+ Some(ChannelAnnouncement::Full(pending_msg)) => Some(pending_msg) == full_msg,
+ Some(ChannelAnnouncement::Unsigned(pending_msg)) => pending_msg == msg,
+ None => {
+ // This shouldn't actually be reachable. We set the
+ // `channel_announce` field under the same lock as setting the
+ // channel map entry. Still, we can just treat it as
+ // non-matching and let the new request fly.
+ debug_assert!(false);
+ false
+ },
+ };
+ if pending_matches {
+ return Err(LightningError {
+ err: "Channel announcement is already being checked".to_owned(),
+ action: ErrorAction::IgnoreDuplicateGossip,
+ });
+ } else {
+ // The earlier lookup is a different message. If we have another
+ // request in-flight now replace the original.
+ // Note that in the replace case whether to replace is somewhat
+ // arbitrary - both results will be handled, we're just updating the
+ // value that will be compared to future lookups with the same SCID.
+ if let Some(item) = replacement {
+ *e.get_mut() = item;
+ }
+ }
+ },
+ None => {
+ // The earlier lookup already resolved. We can't be sure its the same
+ // so just remove/replace it and move on.
+ if let Some(item) = replacement {
+ *e.get_mut() = item;
+ } else { e.remove(); }
+ },
+ }
+ },
+ hash_map::Entry::Vacant(v) => {
+ if let Some(item) = replacement { v.insert(item); }
+ },
+ }
+ Ok(())
+ }
+
+ pub(super) fn check_channel_announcement<U: Deref>(&self,
+ utxo_lookup: &Option<U>, msg: &msgs::UnsignedChannelAnnouncement,
+ full_msg: Option<&msgs::ChannelAnnouncement>
+ ) -> Result<Option<u64>, msgs::LightningError> where U::Target: UtxoLookup {
+ let handle_result = |res| {
+ match res {
+ Ok(TxOut { value, script_pubkey }) => {
+ let expected_script =
+ make_funding_redeemscript_from_slices(msg.bitcoin_key_1.as_slice(), msg.bitcoin_key_2.as_slice()).to_v0_p2wsh();
+ if script_pubkey != expected_script {
+ return Err(LightningError{
+ err: format!("Channel announcement key ({}) didn't match on-chain script ({})",
+ expected_script.to_hex(), script_pubkey.to_hex()),
+ action: ErrorAction::IgnoreError
+ });
+ }
+ Ok(Some(value))
+ },
+ Err(UtxoLookupError::UnknownChain) => {
+ Err(LightningError {
+ err: format!("Channel announced on an unknown chain ({})",
+ msg.chain_hash.encode().to_hex()),
+ action: ErrorAction::IgnoreError
+ })
+ },
+ Err(UtxoLookupError::UnknownTx) => {
+ Err(LightningError {
+ err: "Channel announced without corresponding UTXO entry".to_owned(),
+ action: ErrorAction::IgnoreError
+ })
+ },
+ }
+ };
+
+ Self::check_replace_previous_entry(msg, full_msg, None,
+ &mut self.internal.lock().unwrap().channels)?;
+
+ match utxo_lookup {
+ &None => {
+ // Tentatively accept, potentially exposing us to DoS attacks
+ Ok(None)
+ },
+ &Some(ref utxo_lookup) => {
+ match utxo_lookup.get_utxo(&msg.chain_hash, msg.short_channel_id) {
+ UtxoResult::Sync(res) => handle_result(res),
+ UtxoResult::Async(future) => {
+ let mut pending_checks = self.internal.lock().unwrap();
+ let mut async_messages = future.state.lock().unwrap();
+ if let Some(res) = async_messages.complete.take() {
+ // In the unlikely event the future resolved before we managed to get it,
+ // handle the result in-line.
+ handle_result(res)
+ } else {
+ Self::check_replace_previous_entry(msg, full_msg,
+ Some(Arc::downgrade(&future.state)), &mut pending_checks.channels)?;
+ async_messages.channel_announce = Some(
+ if let Some(msg) = full_msg { ChannelAnnouncement::Full(msg.clone()) }
+ else { ChannelAnnouncement::Unsigned(msg.clone()) });
+ pending_checks.nodes.entry(msg.node_id_1)
+ .or_insert(Vec::new()).push(Arc::downgrade(&future.state));
+ pending_checks.nodes.entry(msg.node_id_2)
+ .or_insert(Vec::new()).push(Arc::downgrade(&future.state));
+ Err(LightningError {
+ err: "Channel being checked async".to_owned(),
+ action: ErrorAction::IgnoreAndLog(Level::Gossip),
+ })
+ }
+ },
+ }
+ }
+ }
+ }
+
+ /// The maximum number of pending gossip checks before [`Self::too_many_checks_pending`]
+ /// returns `true`. Note that this isn't a strict upper-bound on the number of checks pending -
+ /// each peer may, at a minimum, read one more socket buffer worth of `channel_announcement`s
+ /// which we'll have to process. With a socket buffer of 4KB and a minimum
+ /// `channel_announcement` size of, roughly, 429 bytes, this may leave us with `10*our peer
+ /// count` messages to process beyond this limit. Because we'll probably have a few peers,
+ /// there's no reason for this constant to be materially less than 30 or so, and 32 in-flight
+ /// checks should be more than enough for decent parallelism.
+ const MAX_PENDING_LOOKUPS: usize = 32;
+
+ /// Returns true if there are a large number of async checks pending and future
+ /// `channel_announcement` messages should be delayed. Note that this is only a hint and
+ /// messages already in-flight may still have to be handled for various reasons.
+ pub(super) fn too_many_checks_pending(&self) -> bool {
+ let mut pending_checks = self.internal.lock().unwrap();
+ if pending_checks.channels.len() > Self::MAX_PENDING_LOOKUPS {
+ // If we have many channel checks pending, ensure we don't have any dangling checks
+ // (i.e. checks where the user told us they'd call back but drop'd the `UtxoFuture`
+ // instead) before we commit to applying backpressure.
+ pending_checks.channels.retain(|_, chan| {
+ Weak::upgrade(&chan).is_some()
+ });
+ pending_checks.nodes.retain(|_, channels| {
+ channels.retain(|chan| Weak::upgrade(&chan).is_some());
+ !channels.is_empty()
+ });
+ pending_checks.channels.len() > Self::MAX_PENDING_LOOKUPS
+ } else {
+ false
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::routing::gossip::tests::*;
+ use crate::util::test_utils::{TestChainSource, TestLogger};
+ use crate::ln::msgs;
+
+ use bitcoin::secp256k1::{Secp256k1, SecretKey};
+
+ use core::sync::atomic::Ordering;
+
+ fn get_network() -> (TestChainSource, NetworkGraph<Box<TestLogger>>) {
+ let logger = Box::new(TestLogger::new());
+ let chain_source = TestChainSource::new(bitcoin::Network::Testnet);
+ let network_graph = NetworkGraph::new(bitcoin::Network::Testnet, logger);
+
+ (chain_source, network_graph)
+ }
+
+ fn get_test_objects() -> (msgs::ChannelAnnouncement, TestChainSource,
+ NetworkGraph<Box<TestLogger>>, bitcoin::Script, msgs::NodeAnnouncement,
+ msgs::NodeAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, msgs::ChannelUpdate)
+ {
+ let secp_ctx = Secp256k1::new();
+
+ let (chain_source, network_graph) = get_network();
+
+ let good_script = get_channel_script(&secp_ctx);
+ let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
+ let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
+ let valid_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
+
+ let node_a_announce = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
+ let node_b_announce = get_signed_node_announcement(|_| {}, node_2_privkey, &secp_ctx);
+
+ // Note that we have to set the "direction" flag correctly on both messages
+ let chan_update_a = get_signed_channel_update(|msg| msg.flags = 0, node_1_privkey, &secp_ctx);
+ let chan_update_b = get_signed_channel_update(|msg| msg.flags = 1, node_2_privkey, &secp_ctx);
+ let chan_update_c = get_signed_channel_update(|msg| {
+ msg.flags = 1; msg.timestamp += 1; }, node_2_privkey, &secp_ctx);
+
+ (valid_announcement, chain_source, network_graph, good_script, node_a_announce,
+ node_b_announce, chan_update_a, chan_update_b, chan_update_c)
+ }
+
+ #[test]
+ fn test_fast_async_lookup() {
+ // Check that async lookups which resolve quicker than the future is returned to the
+ // `get_utxo` call can read it still resolve properly.
+ let (valid_announcement, chain_source, network_graph, good_script, ..) = get_test_objects();
+
+ let future = UtxoFuture::new();
+ future.resolve_without_forwarding(&network_graph,
+ Ok(TxOut { value: 1_000_000, script_pubkey: good_script }));
+ *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone());
+
+ network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap();
+ assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_some());
+ }
+
+ #[test]
+ fn test_async_lookup() {
+ // Test a simple async lookup
+ let (valid_announcement, chain_source, network_graph, good_script,
+ node_a_announce, node_b_announce, ..) = get_test_objects();
+
+ let future = UtxoFuture::new();
+ *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone());
+
+ assert_eq!(
+ network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err,
+ "Channel being checked async");
+ assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none());
+
+ future.resolve_without_forwarding(&network_graph,
+ Ok(TxOut { value: 0, script_pubkey: good_script }));
+ network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).unwrap();
+ network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).unwrap();
+
+ assert!(network_graph.read_only().nodes().get(&valid_announcement.contents.node_id_1)
+ .unwrap().announcement_info.is_none());
+
+ network_graph.update_node_from_announcement(&node_a_announce).unwrap();
+ network_graph.update_node_from_announcement(&node_b_announce).unwrap();
+
+ assert!(network_graph.read_only().nodes().get(&valid_announcement.contents.node_id_1)
+ .unwrap().announcement_info.is_some());
+ }
+
+ #[test]
+ fn test_invalid_async_lookup() {
+ // Test an async lookup which returns an incorrect script
+ let (valid_announcement, chain_source, network_graph, ..) = get_test_objects();
+
+ let future = UtxoFuture::new();
+ *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone());
+
+ assert_eq!(
+ network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err,
+ "Channel being checked async");
+ assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none());
+
+ future.resolve_without_forwarding(&network_graph,
+ Ok(TxOut { value: 1_000_000, script_pubkey: bitcoin::Script::new() }));
+ assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none());
+ }
+
+ #[test]
+ fn test_failing_async_lookup() {
+ // Test an async lookup which returns an error
+ let (valid_announcement, chain_source, network_graph, ..) = get_test_objects();
+
+ let future = UtxoFuture::new();
+ *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone());
+
+ assert_eq!(
+ network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err,
+ "Channel being checked async");
+ assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none());
+
+ future.resolve_without_forwarding(&network_graph, Err(UtxoLookupError::UnknownTx));
+ assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none());
+ }
+
+ #[test]
+ fn test_updates_async_lookup() {
+ // Test async lookups will process pending channel_update/node_announcements once they
+ // complete.
+ let (valid_announcement, chain_source, network_graph, good_script, node_a_announce,
+ node_b_announce, chan_update_a, chan_update_b, ..) = get_test_objects();
+
+ let future = UtxoFuture::new();
+ *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone());
+
+ assert_eq!(
+ network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err,
+ "Channel being checked async");
+ assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none());
+
+ assert_eq!(
+ network_graph.update_node_from_announcement(&node_a_announce).unwrap_err().err,
+ "Awaiting channel_announcement validation to accept node_announcement");
+ assert_eq!(
+ network_graph.update_node_from_announcement(&node_b_announce).unwrap_err().err,
+ "Awaiting channel_announcement validation to accept node_announcement");
+
+ assert_eq!(network_graph.update_channel(&chan_update_a).unwrap_err().err,
+ "Awaiting channel_announcement validation to accept channel_update");
+ assert_eq!(network_graph.update_channel(&chan_update_b).unwrap_err().err,
+ "Awaiting channel_announcement validation to accept channel_update");
+
+ future.resolve_without_forwarding(&network_graph,
+ Ok(TxOut { value: 1_000_000, script_pubkey: good_script }));
+
+ assert!(network_graph.read_only().channels()
+ .get(&valid_announcement.contents.short_channel_id).unwrap().one_to_two.is_some());
+ assert!(network_graph.read_only().channels()
+ .get(&valid_announcement.contents.short_channel_id).unwrap().two_to_one.is_some());
+
+ assert!(network_graph.read_only().nodes().get(&valid_announcement.contents.node_id_1)
+ .unwrap().announcement_info.is_some());
+ assert!(network_graph.read_only().nodes().get(&valid_announcement.contents.node_id_2)
+ .unwrap().announcement_info.is_some());
+ }
+
+ #[test]
+ fn test_latest_update_async_lookup() {
+ // Test async lookups will process the latest channel_update if two are received while
+ // awaiting an async UTXO lookup.
+ let (valid_announcement, chain_source, network_graph, good_script, _,
+ _, chan_update_a, chan_update_b, chan_update_c, ..) = get_test_objects();
+
+ let future = UtxoFuture::new();
+ *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone());
+
+ assert_eq!(
+ network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err,
+ "Channel being checked async");
+ assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none());
+
+ assert_eq!(network_graph.update_channel(&chan_update_a).unwrap_err().err,
+ "Awaiting channel_announcement validation to accept channel_update");
+ assert_eq!(network_graph.update_channel(&chan_update_b).unwrap_err().err,
+ "Awaiting channel_announcement validation to accept channel_update");
+ assert_eq!(network_graph.update_channel(&chan_update_c).unwrap_err().err,
+ "Awaiting channel_announcement validation to accept channel_update");
+
+ future.resolve_without_forwarding(&network_graph,
+ Ok(TxOut { value: 1_000_000, script_pubkey: good_script }));
+
+ assert_eq!(chan_update_a.contents.timestamp, chan_update_b.contents.timestamp);
+ let graph_lock = network_graph.read_only();
+ assert!(graph_lock.channels()
+ .get(&valid_announcement.contents.short_channel_id).as_ref().unwrap()
+ .one_to_two.as_ref().unwrap().last_update !=
+ graph_lock.channels()
+ .get(&valid_announcement.contents.short_channel_id).as_ref().unwrap()
+ .two_to_one.as_ref().unwrap().last_update);
+ }
+
+ #[test]
+ fn test_no_double_lookups() {
+ // Test that a pending async lookup will prevent a second async lookup from flying, but
+ // only if the channel_announcement message is identical.
+ let (valid_announcement, chain_source, network_graph, good_script, ..) = get_test_objects();
+
+ let future = UtxoFuture::new();
+ *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone());
+
+ assert_eq!(
+ network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err,
+ "Channel being checked async");
+ assert_eq!(chain_source.get_utxo_call_count.load(Ordering::Relaxed), 1);
+
+ // If we make a second request with the same message, the call count doesn't increase...
+ let future_b = UtxoFuture::new();
+ *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future_b.clone());
+ assert_eq!(
+ network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err,
+ "Channel announcement is already being checked");
+ assert_eq!(chain_source.get_utxo_call_count.load(Ordering::Relaxed), 1);
+
+ // But if we make a third request with a tweaked message, we should get a second call
+ // against our new future...
+ let secp_ctx = Secp256k1::new();
+ let replacement_pk_1 = &SecretKey::from_slice(&[99; 32]).unwrap();
+ let replacement_pk_2 = &SecretKey::from_slice(&[98; 32]).unwrap();
+ let invalid_announcement = get_signed_channel_announcement(|_| {}, replacement_pk_1, replacement_pk_2, &secp_ctx);
+ assert_eq!(
+ network_graph.update_channel_from_announcement(&invalid_announcement, &Some(&chain_source)).unwrap_err().err,
+ "Channel being checked async");
+ assert_eq!(chain_source.get_utxo_call_count.load(Ordering::Relaxed), 2);
+
+ // Still, if we resolve the original future, the original channel will be accepted.
+ future.resolve_without_forwarding(&network_graph,
+ Ok(TxOut { value: 1_000_000, script_pubkey: good_script }));
+ assert!(!network_graph.read_only().channels()
+ .get(&valid_announcement.contents.short_channel_id).unwrap()
+ .announcement_message.as_ref().unwrap()
+ .contents.features.supports_unknown_test_feature());
+ }
+
+ #[test]
+ fn test_checks_backpressure() {
+ // Test that too_many_checks_pending returns true when there are many checks pending, and
+ // returns false once they complete.
+ let secp_ctx = Secp256k1::new();
+ let (chain_source, network_graph) = get_network();
+
+ // We cheat and use a single future for all the lookups to complete them all at once.
+ let future = UtxoFuture::new();
+ *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone());
+
+ let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
+ let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
+
+ for i in 0..PendingChecks::MAX_PENDING_LOOKUPS {
+ let valid_announcement = get_signed_channel_announcement(
+ |msg| msg.short_channel_id += 1 + i as u64, node_1_privkey, node_2_privkey, &secp_ctx);
+ network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err();
+ assert!(!network_graph.pending_checks.too_many_checks_pending());
+ }
+
+ let valid_announcement = get_signed_channel_announcement(
+ |_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
+ network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err();
+ assert!(network_graph.pending_checks.too_many_checks_pending());
+
+ // Once the future completes the "too many checks" flag should reset.
+ future.resolve_without_forwarding(&network_graph, Err(UtxoLookupError::UnknownTx));
+ assert!(!network_graph.pending_checks.too_many_checks_pending());
+ }
+
+ #[test]
+ fn test_checks_backpressure_drop() {
+ // Test that too_many_checks_pending returns true when there are many checks pending, and
+ // returns false if we drop some of the futures without completion.
+ let secp_ctx = Secp256k1::new();
+ let (chain_source, network_graph) = get_network();
+
+ // We cheat and use a single future for all the lookups to complete them all at once.
+ *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(UtxoFuture::new());
+
+ let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
+ let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
+
+ for i in 0..PendingChecks::MAX_PENDING_LOOKUPS {
+ let valid_announcement = get_signed_channel_announcement(
+ |msg| msg.short_channel_id += 1 + i as u64, node_1_privkey, node_2_privkey, &secp_ctx);
+ network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err();
+ assert!(!network_graph.pending_checks.too_many_checks_pending());
+ }
+
+ let valid_announcement = get_signed_channel_announcement(
+ |_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
+ network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err();
+ assert!(network_graph.pending_checks.too_many_checks_pending());
+
+ // Once the future is drop'd (by resetting the `utxo_ret` value) the "too many checks" flag
+ // should reset to false.
+ *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Err(UtxoLookupError::UnknownTx));
+ assert!(!network_graph.pending_checks.too_many_checks_pending());
+ }
+}
use crate::prelude::HashMap;
+use super::{LockTestExt, LockHeldState};
+
#[cfg(feature = "backtrace")]
use {crate::prelude::hash_map, backtrace::Backtrace, std::sync::Once};
}
#[cfg(feature = "backtrace")]
-fn get_construction_location(backtrace: &Backtrace) -> String {
+fn get_construction_location(backtrace: &Backtrace) -> (String, Option<u32>) {
// Find the first frame that is after `debug_sync` (or that is in our tests) and use
// that as the mutex construction site. Note that the first few frames may be in
// the `backtrace` crate, so we have to ignore those.
let symbol_name = symbol.name().unwrap().as_str().unwrap();
if !sync_mutex_constr_regex.is_match(symbol_name) {
if found_debug_sync {
- if let Some(col) = symbol.colno() {
- return format!("{}:{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap(), col);
- } else {
- // Windows debug symbols don't support column numbers, so fall back to
- // line numbers only if no `colno` is available
- return format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap());
- }
+ return (format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()), symbol.colno());
}
} else { found_debug_sync = true; }
}
#[cfg(feature = "backtrace")]
{
- let lock_constr_location = get_construction_location(&res._lock_construction_bt);
+ let (lock_constr_location, lock_constr_colno) =
+ get_construction_location(&res._lock_construction_bt);
LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } });
let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap();
match locks.entry(lock_constr_location) {
- hash_map::Entry::Occupied(e) => return Arc::clone(e.get()),
+ hash_map::Entry::Occupied(e) => {
+ assert_eq!(lock_constr_colno,
+ get_construction_location(&e.get()._lock_construction_bt).1,
+ "Because Windows doesn't support column number results in backtraces, we cannot construct two mutexes on the same line or we risk lockorder detection false positives.");
+ return Arc::clone(e.get())
+ },
hash_map::Entry::Vacant(e) => { e.insert(Arc::clone(&res)); },
}
}
res
}
- // Returns whether we were a recursive lock (only relevant for read)
- fn _pre_lock(this: &Arc<LockMetadata>, read: bool) -> bool {
- let mut inserted = false;
+ fn pre_lock(this: &Arc<LockMetadata>, _double_lock_self_allowed: bool) {
LOCKS_HELD.with(|held| {
// For each lock which is currently locked, check that no lock's locked-before
// set includes the lock we're about to lock, which would imply a lockorder
// inversion.
for (locked_idx, _locked) in held.borrow().iter() {
- if read && *locked_idx == this.lock_idx {
- // Recursive read locks are explicitly allowed
- return;
+ if *locked_idx == this.lock_idx {
+ // Note that with `feature = "backtrace"` set, we may be looking at different
+ // instances of the same lock. Still, doing so is quite risky, a total order
+ // must be maintained, and doing so across a set of otherwise-identical mutexes
+ // is fraught with issues.
+ #[cfg(feature = "backtrace")]
+ debug_assert!(_double_lock_self_allowed,
+ "Tried to acquire a lock while it was held!\nLock constructed at {}",
+ get_construction_location(&this._lock_construction_bt).0);
+ #[cfg(not(feature = "backtrace"))]
+ panic!("Tried to acquire a lock while it was held!");
}
}
- for (locked_idx, locked) in held.borrow().iter() {
- if !read && *locked_idx == this.lock_idx {
- // With `feature = "backtrace"` set, we may be looking at different instances
- // of the same lock.
- debug_assert!(cfg!(feature = "backtrace"), "Tried to acquire a lock while it was held!");
- }
+ for (_locked_idx, locked) in held.borrow().iter() {
for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() {
if *locked_dep_idx == this.lock_idx && *locked_dep_idx != locked.lock_idx {
#[cfg(feature = "backtrace")]
panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
- get_construction_location(&this._lock_construction_bt), this.lock_idx, this._lock_construction_bt,
- get_construction_location(&locked._lock_construction_bt), locked.lock_idx, locked._lock_construction_bt,
+ get_construction_location(&this._lock_construction_bt).0,
+ this.lock_idx, this._lock_construction_bt,
+ get_construction_location(&locked._lock_construction_bt).0,
+ locked.lock_idx, locked._lock_construction_bt,
_locked_dep._lockdep_trace);
#[cfg(not(feature = "backtrace"))]
panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
}
}
held.borrow_mut().insert(this.lock_idx, Arc::clone(this));
- inserted = true;
});
- inserted
}
- fn pre_lock(this: &Arc<LockMetadata>) { Self::_pre_lock(this, false); }
- fn pre_read_lock(this: &Arc<LockMetadata>) -> bool { Self::_pre_lock(this, true) }
+ fn held_by_thread(this: &Arc<LockMetadata>) -> LockHeldState {
+ let mut res = LockHeldState::NotHeldByThread;
+ LOCKS_HELD.with(|held| {
+ for (locked_idx, _locked) in held.borrow().iter() {
+ if *locked_idx == this.lock_idx {
+ res = LockHeldState::HeldByThread;
+ }
+ }
+ });
+ res
+ }
fn try_locked(this: &Arc<LockMetadata>) {
LOCKS_HELD.with(|held| {
inner: StdMutex<T>,
deps: Arc<LockMetadata>,
}
+impl<T: Sized> Mutex<T> {
+ pub(crate) fn into_inner(self) -> LockResult<T> {
+ self.inner.into_inner().map_err(|_| ())
+ }
+}
#[must_use = "if unused the Mutex will immediately unlock"]
pub struct MutexGuard<'a, T: Sized + 'a> {
}
pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
- LockMetadata::pre_lock(&self.deps);
+ LockMetadata::pre_lock(&self.deps, false);
self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ())
}
}
}
+impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
+ #[inline]
+ fn held_by_thread(&self) -> LockHeldState {
+ LockMetadata::held_by_thread(&self.deps)
+ }
+ type ExclLock = MutexGuard<'a, T>;
+ #[inline]
+ fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard<T> {
+ LockMetadata::pre_lock(&self.deps, true);
+ self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).unwrap()
+ }
+}
+
pub struct RwLock<T: Sized> {
inner: StdRwLock<T>,
deps: Arc<LockMetadata>,
pub struct RwLockReadGuard<'a, T: Sized + 'a> {
lock: &'a RwLock<T>,
- first_lock: bool,
guard: StdRwLockReadGuard<'a, T>,
}
impl<T: Sized> Drop for RwLockReadGuard<'_, T> {
fn drop(&mut self) {
- if !self.first_lock {
- // Note that its not strictly true that the first taken read lock will get unlocked
- // last, but in practice our locks are always taken as RAII, so it should basically
- // always be true.
- return;
- }
LOCKS_HELD.with(|held| {
held.borrow_mut().remove(&self.lock.deps.lock_idx);
});
}
pub fn read<'a>(&'a self) -> LockResult<RwLockReadGuard<'a, T>> {
- let first_lock = LockMetadata::pre_read_lock(&self.deps);
- self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard, first_lock }).map_err(|_| ())
+ // Note that while we could be taking a recursive read lock here, Rust's `RwLock` may
+ // deadlock trying to take a second read lock if another thread is waiting on the write
+ // lock. This behavior is platform dependent, but our in-tree `FairRwLock` guarantees
+ // such a deadlock.
+ LockMetadata::pre_lock(&self.deps, false);
+ self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard }).map_err(|_| ())
}
pub fn write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
- LockMetadata::pre_lock(&self.deps);
+ LockMetadata::pre_lock(&self.deps, false);
self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ())
}
}
}
+impl<'a, T: 'a> LockTestExt<'a> for RwLock<T> {
+ #[inline]
+ fn held_by_thread(&self) -> LockHeldState {
+ LockMetadata::held_by_thread(&self.deps)
+ }
+ type ExclLock = RwLockWriteGuard<'a, T>;
+ #[inline]
+ fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard<'a, T> {
+ LockMetadata::pre_lock(&self.deps, true);
+ self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).unwrap()
+ }
+}
+
pub type FairRwLock<T> = RwLock<T>;
--- /dev/null
+use std::sync::{LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, TryLockResult};
+use std::sync::atomic::{AtomicUsize, Ordering};
+use super::{LockHeldState, LockTestExt};
+
+/// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on
+/// Linux with pthreads under the hood, readers trivially and completely starve writers).
+/// Because we often hold read locks while doing message processing in multiple threads which
+/// can use significant CPU time, with write locks being time-sensitive but relatively small in
+/// CPU time, we can end up with starvation completely blocking incoming connections or pings,
+/// especially during initial graph sync.
+///
+/// Thus, we need to block readers when a writer is pending, which we do with a trivial RwLock
+/// wrapper here. Its not particularly optimized, but provides some reasonable fairness by
+/// blocking readers (by taking the write lock) if there are writers pending when we go to take
+/// a read lock.
+pub struct FairRwLock<T> {
+ lock: RwLock<T>,
+ waiting_writers: AtomicUsize,
+}
+
+impl<T> FairRwLock<T> {
+ pub fn new(t: T) -> Self {
+ Self { lock: RwLock::new(t), waiting_writers: AtomicUsize::new(0) }
+ }
+
+ // Note that all atomic accesses are relaxed, as we do not rely on the atomics here for any
+ // ordering at all, instead relying on the underlying RwLock to provide ordering of unrelated
+ // memory.
+ pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
+ self.waiting_writers.fetch_add(1, Ordering::Relaxed);
+ let res = self.lock.write();
+ self.waiting_writers.fetch_sub(1, Ordering::Relaxed);
+ res
+ }
+
+ pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
+ if self.waiting_writers.load(Ordering::Relaxed) != 0 {
+ let _write_queue_lock = self.lock.write();
+ }
+ // Note that we don't consider ensuring that an underlying RwLock allowing writers to
+ // starve readers doesn't exhibit the same behavior here. I'm not aware of any
+ // libstd-backing RwLock which exhibits this behavior, and as documented in the
+ // struct-level documentation, it shouldn't pose a significant issue for our current
+ // codebase.
+ self.lock.read()
+ }
+
+ #[allow(dead_code)]
+ pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
+ self.lock.try_write()
+ }
+}
+
+impl<'a, T: 'a> LockTestExt<'a> for FairRwLock<T> {
+ #[inline]
+ fn held_by_thread(&self) -> LockHeldState {
+ // fairrwlock is only built in non-test modes, so we should never support tests.
+ LockHeldState::Unsupported
+ }
+ type ExclLock = RwLockWriteGuard<'a, T>;
+ #[inline]
+ fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard<'a, T> {
+ self.write().unwrap()
+ }
+}
+#[allow(dead_code)] // Depending on the compilation flags some variants are never used
+#[derive(Debug, PartialEq, Eq)]
+pub(crate) enum LockHeldState {
+ HeldByThread,
+ NotHeldByThread,
+ #[cfg(any(feature = "_bench_unstable", not(test)))]
+ Unsupported,
+}
+
+pub(crate) trait LockTestExt<'a> {
+ fn held_by_thread(&self) -> LockHeldState;
+ type ExclLock;
+ /// If two instances of the same mutex are being taken at the same time, it's very easy to have
+ /// a lockorder inversion and risk deadlock. Thus, we default to disabling such locks.
+ ///
+ /// However, sometimes they cannot be avoided. In such cases, this method exists to take a
+ /// mutex while avoiding a test failure. It is deliberately verbose and includes the term
+ /// "unsafe" to indicate that special care needs to be taken to ensure no deadlocks are
+ /// possible.
+ fn unsafe_well_ordered_double_lock_self(&'a self) -> Self::ExclLock;
+}
+
#[cfg(all(feature = "std", not(feature = "_bench_unstable"), test))]
mod debug_sync;
#[cfg(all(feature = "std", not(feature = "_bench_unstable"), test))]
mod test_lockorder_checks;
#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
-pub use ::std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard};
+pub(crate) mod fairrwlock;
+#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
+pub use {std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}, fairrwlock::FairRwLock};
+
#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
-pub use crate::util::fairrwlock::FairRwLock;
+mod ext_impl {
+ use super::*;
+ impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
+ #[inline]
+ fn held_by_thread(&self) -> LockHeldState { LockHeldState::Unsupported }
+ type ExclLock = MutexGuard<'a, T>;
+ #[inline]
+ fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard<T> { self.lock().unwrap() }
+ }
+ impl<'a, T: 'a> LockTestExt<'a> for RwLock<T> {
+ #[inline]
+ fn held_by_thread(&self) -> LockHeldState { LockHeldState::Unsupported }
+ type ExclLock = RwLockWriteGuard<'a, T>;
+ #[inline]
+ fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard<T> { self.write().unwrap() }
+ }
+}
#[cfg(not(feature = "std"))]
mod nostd_sync;
use core::ops::{Deref, DerefMut};
use core::time::Duration;
use core::cell::{RefCell, Ref, RefMut};
+use super::{LockTestExt, LockHeldState};
pub type LockResult<Guard> = Result<Guard, ()>;
pub fn try_lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
Ok(MutexGuard { lock: self.inner.borrow_mut() })
}
+
+ pub fn into_inner(self) -> LockResult<T> {
+ Ok(self.inner.into_inner())
+ }
+}
+
+impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
+ #[inline]
+ fn held_by_thread(&self) -> LockHeldState {
+ if self.lock().is_err() { return LockHeldState::HeldByThread; }
+ else { return LockHeldState::NotHeldByThread; }
+ }
+ type ExclLock = MutexGuard<'a, T>;
+ #[inline]
+ fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard<T> { self.lock().unwrap() }
}
pub struct RwLock<T: ?Sized> {
}
}
+impl<'a, T: 'a> LockTestExt<'a> for RwLock<T> {
+ #[inline]
+ fn held_by_thread(&self) -> LockHeldState {
+ if self.write().is_err() { return LockHeldState::HeldByThread; }
+ else { return LockHeldState::NotHeldByThread; }
+ }
+ type ExclLock = RwLockWriteGuard<'a, T>;
+ #[inline]
+ fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard<T> { self.write().unwrap() }
+}
+
pub type FairRwLock<T> = RwLock<T>;
use crate::sync::debug_sync::{RwLock, Mutex};
+use super::{LockHeldState, LockTestExt};
+
+use std::sync::Arc;
+
#[test]
#[should_panic]
#[cfg(not(feature = "backtrace"))]
}
#[test]
+#[should_panic]
+#[cfg(not(feature = "backtrace"))]
fn recursive_read() {
let lock = RwLock::new(());
let _a = lock.read().unwrap();
}
}
-#[test]
-fn read_recursive_no_lockorder() {
- // Like the above, but note that no lockorder is implied when we recursively read-lock a
- // RwLock, causing this to pass just fine.
- let a = RwLock::new(());
- let b = RwLock::new(());
- let _outer = a.read().unwrap();
- {
- let _a = a.read().unwrap();
- let _b = b.read().unwrap();
- }
- {
- let _b = b.read().unwrap();
- let _a = a.read().unwrap();
- }
-}
-
#[test]
#[should_panic]
fn read_write_lockorder_fail() {
let _a = a.write().unwrap();
}
}
+
+#[test]
+fn test_thread_locked_state() {
+ let mtx = Arc::new(Mutex::new(()));
+ let mtx_ref = Arc::clone(&mtx);
+ assert_eq!(mtx.held_by_thread(), LockHeldState::NotHeldByThread);
+
+ let lck = mtx.lock().unwrap();
+ assert_eq!(mtx.held_by_thread(), LockHeldState::HeldByThread);
+
+ let thrd = std::thread::spawn(move || {
+ assert_eq!(mtx_ref.held_by_thread(), LockHeldState::NotHeldByThread);
+ });
+ thrd.join().unwrap();
+ assert_eq!(mtx.held_by_thread(), LockHeldState::HeldByThread);
+
+ std::mem::drop(lck);
+ assert_eq!(mtx.held_by_thread(), LockHeldState::NotHeldByThread);
+}
/// too-many-hops, etc).
InvalidRoute {
/// A human-readable error message
- err: &'static str
+ err: String
},
/// We were unable to complete the request as the Channel required to do so is unable to
/// complete the request (or was not found). This can take many forms, including disconnected
}
}
+impl_writeable_tlv_based_enum_upgradable!(APIError,
+ (0, APIMisuseError) => { (0, err, required), },
+ (2, FeeRateTooHigh) => {
+ (0, err, required),
+ (2, feerate, required),
+ },
+ (4, InvalidRoute) => { (0, err, required), },
+ (6, ChannelUnavailable) => { (0, err, required), },
+ (8, MonitorUpdateInProgress) => {},
+ (10, IncompatibleShutdownScript) => { (0, script, required), },
+);
+
#[inline]
pub(crate) fn get_onion_debug_field(error_code: u16) -> (&'static str, usize) {
match error_code & 0xff {
use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS;
use crate::ln::features::ChannelTypeFeatures;
use crate::ln::msgs;
-use crate::ln::msgs::DecodeError;
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::routing::gossip::NetworkUpdate;
-use crate::util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, WithoutLength, OptionDeserWrapper};
+use crate::util::errors::APIError;
+use crate::util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, RequiredWrapper, UpgradableRequired, WithoutLength};
use crate::routing::router::{RouteHop, RouteParameters};
use bitcoin::{PackedLockTime, Transaction};
(2, SpontaneousPayment)
);
+/// When the payment path failure took place and extra details about it. [`PathFailure::OnPath`] may
+/// contain a [`NetworkUpdate`] that needs to be applied to the [`NetworkGraph`].
+///
+/// [`NetworkUpdate`]: crate::routing::gossip::NetworkUpdate
+/// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub enum PathFailure {
+ /// We failed to initially send the payment and no HTLC was committed to. Contains the relevant
+ /// error.
+ InitialSend {
+ /// The error surfaced from initial send.
+ err: APIError,
+ },
+ /// A hop on the path failed to forward our payment.
+ OnPath {
+ /// If present, this [`NetworkUpdate`] should be applied to the [`NetworkGraph`] so that routing
+ /// decisions can take into account the update.
+ ///
+ /// [`NetworkUpdate`]: crate::routing::gossip::NetworkUpdate
+ /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph
+ network_update: Option<NetworkUpdate>,
+ },
+}
+
+impl_writeable_tlv_based_enum_upgradable!(PathFailure,
+ (0, OnPath) => {
+ (0, network_update, upgradable_option),
+ },
+ (2, InitialSend) => {
+ (0, err, upgradable_required),
+ },
+);
+
#[derive(Clone, Debug, PartialEq, Eq)]
/// The reason the channel was closed. See individual variants more details.
pub enum ClosureReason {
/// Note for MPP payments: in rare cases, this event may be preceded by a `PaymentPathFailed`
/// event. In this situation, you SHOULD treat this payment as having succeeded.
PaymentSent {
- /// The id returned by [`ChannelManager::send_payment`] and used with
- /// [`ChannelManager::retry_payment`].
+ /// The id returned by [`ChannelManager::send_payment`].
///
/// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
payment_id: Option<PaymentId>,
/// The preimage to the hash given to ChannelManager::send_payment.
/// Note that this serves as a payment receipt, if you wish to have such a thing, you must
fee_paid_msat: Option<u64>,
},
/// Indicates an outbound payment failed. Individual [`Event::PaymentPathFailed`] events
- /// provide failure information for each MPP part in the payment.
+ /// provide failure information for each path attempt in the payment, including retries.
///
/// This event is provided once there are no further pending HTLCs for the payment and the
- /// payment is no longer retryable due to [`ChannelManager::abandon_payment`] having been
- /// called for the corresponding payment.
+ /// payment is no longer retryable, due either to the [`Retry`] provided or
+ /// [`ChannelManager::abandon_payment`] having been called for the corresponding payment.
///
+ /// [`Retry`]: crate::ln::channelmanager::Retry
/// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
PaymentFailed {
/// The id returned by [`ChannelManager::send_payment`] and used with
- /// [`ChannelManager::retry_payment`] and [`ChannelManager::abandon_payment`].
+ /// [`ChannelManager::abandon_payment`].
///
/// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
/// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
payment_id: PaymentId,
/// The hash that was given to [`ChannelManager::send_payment`].
/// Always generated after [`Event::PaymentSent`] and thus useful for scoring channels. See
/// [`Event::PaymentSent`] for obtaining the payment preimage.
PaymentPathSuccessful {
- /// The id returned by [`ChannelManager::send_payment`] and used with
- /// [`ChannelManager::retry_payment`].
+ /// The id returned by [`ChannelManager::send_payment`].
///
/// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
payment_id: PaymentId,
/// The hash that was given to [`ChannelManager::send_payment`].
///
/// May contain a closed channel if the HTLC sent along the path was fulfilled on chain.
path: Vec<RouteHop>,
},
- /// Indicates an outbound HTLC we sent failed. Probably some intermediary node dropped
- /// something. You may wish to retry with a different route.
- ///
- /// If you have given up retrying this payment and wish to fail it, you MUST call
- /// [`ChannelManager::abandon_payment`] at least once for a given [`PaymentId`] or memory
- /// related to payment tracking will leak.
+ /// Indicates an outbound HTLC we sent failed, likely due to an intermediary node being unable to
+ /// handle the HTLC.
///
/// Note that this does *not* indicate that all paths for an MPP payment have failed, see
- /// [`Event::PaymentFailed`] and [`all_paths_failed`].
+ /// [`Event::PaymentFailed`].
+ ///
+ /// See [`ChannelManager::abandon_payment`] for giving up on this payment before its retries have
+ /// been exhausted.
///
/// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
- /// [`all_paths_failed`]: Self::PaymentPathFailed::all_paths_failed
PaymentPathFailed {
/// The id returned by [`ChannelManager::send_payment`] and used with
- /// [`ChannelManager::retry_payment`] and [`ChannelManager::abandon_payment`].
+ /// [`ChannelManager::abandon_payment`].
///
/// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
/// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
payment_id: Option<PaymentId>,
/// The hash that was given to [`ChannelManager::send_payment`].
/// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
payment_hash: PaymentHash,
/// Indicates the payment was rejected for some reason by the recipient. This implies that
- /// the payment has failed, not just the route in question. If this is not set, you may
- /// retry the payment via a different route.
+ /// the payment has failed, not just the route in question. If this is not set, the payment may
+ /// be retried via a different route.
payment_failed_permanently: bool,
- /// Any failure information conveyed via the Onion return packet by a node along the failed
- /// payment route.
- ///
- /// Should be applied to the [`NetworkGraph`] so that routing decisions can take into
- /// account the update.
+ /// Extra error details based on the failure type. May contain an update that needs to be
+ /// applied to the [`NetworkGraph`].
///
/// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph
- network_update: Option<NetworkUpdate>,
- /// For both single-path and multi-path payments, this is set if all paths of the payment have
- /// failed. This will be set to false if (1) this is an MPP payment and (2) other parts of the
- /// larger MPP payment were still in flight when this event was generated.
- ///
- /// Note that if you are retrying individual MPP parts, using this value to determine if a
- /// payment has fully failed is race-y. Because multiple failures can happen prior to events
- /// being processed, you may retry in response to a first failure, with a second failure
- /// (with `all_paths_failed` set) still pending. Then, when the second failure is processed
- /// you will see `all_paths_failed` set even though the retry of the first failure still
- /// has an associated in-flight HTLC. See (1) for an example of such a failure.
- ///
- /// If you wish to retry individual MPP parts and learn when a payment has failed, you must
- /// call [`ChannelManager::abandon_payment`] and wait for a [`Event::PaymentFailed`] event.
- ///
- /// (1) <https://github.com/lightningdevkit/rust-lightning/issues/1164>
- ///
- /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
- all_paths_failed: bool,
+ failure: PathFailure,
/// The payment path that failed.
path: Vec<RouteHop>,
/// The channel responsible for the failed payment path.
/// If this is `Some`, then the corresponding channel should be avoided when the payment is
/// retried. May be `None` for older [`Event`] serializations.
short_channel_id: Option<u64>,
- /// Parameters needed to compute a new [`Route`] when retrying the failed payment path.
- ///
- /// See [`find_route`] for details.
- ///
- /// [`Route`]: crate::routing::router::Route
- /// [`find_route`]: crate::routing::router::find_route
- retry: Option<RouteParameters>,
#[cfg(test)]
error_code: Option<u16>,
#[cfg(test)]
});
},
&Event::PaymentPathFailed {
- ref payment_id, ref payment_hash, ref payment_failed_permanently, ref network_update,
- ref all_paths_failed, ref path, ref short_channel_id, ref retry,
+ ref payment_id, ref payment_hash, ref payment_failed_permanently, ref failure,
+ ref path, ref short_channel_id,
#[cfg(test)]
ref error_code,
#[cfg(test)]
error_data.write(writer)?;
write_tlv_fields!(writer, {
(0, payment_hash, required),
- (1, network_update, option),
+ (1, None::<NetworkUpdate>, option), // network_update in LDK versions prior to 0.0.114
(2, payment_failed_permanently, required),
- (3, all_paths_failed, required),
+ (3, false, required), // all_paths_failed in LDK versions prior to 0.0.114
(5, *path, vec_type),
(7, short_channel_id, option),
- (9, retry, option),
+ (9, None::<RouteParameters>, option), // retry in LDK versions prior to 0.0.115
(11, payment_id, option),
+ (13, failure, required),
});
},
&Event::PendingHTLCsForwardable { time_forwardable: _ } => {
let mut payment_hash = PaymentHash([0; 32]);
let mut payment_failed_permanently = false;
let mut network_update = None;
- let mut all_paths_failed = Some(true);
let mut path: Option<Vec<RouteHop>> = Some(vec![]);
let mut short_channel_id = None;
- let mut retry = None;
let mut payment_id = None;
+ let mut failure_opt = None;
read_tlv_fields!(reader, {
(0, payment_hash, required),
- (1, network_update, ignorable),
+ (1, network_update, upgradable_option),
(2, payment_failed_permanently, required),
- (3, all_paths_failed, option),
(5, path, vec_type),
(7, short_channel_id, option),
- (9, retry, option),
(11, payment_id, option),
+ (13, failure_opt, upgradable_option),
});
+ let failure = failure_opt.unwrap_or_else(|| PathFailure::OnPath { network_update });
Ok(Some(Event::PaymentPathFailed {
payment_id,
payment_hash,
payment_failed_permanently,
- network_update,
- all_paths_failed: all_paths_failed.unwrap(),
+ failure,
path: path.unwrap(),
short_channel_id,
- retry,
#[cfg(test)]
error_code,
#[cfg(test)]
9u8 => {
let f = || {
let mut channel_id = [0; 32];
- let mut reason = None;
+ let mut reason = UpgradableRequired(None);
let mut user_channel_id_low_opt: Option<u64> = None;
let mut user_channel_id_high_opt: Option<u64> = None;
read_tlv_fields!(reader, {
(0, channel_id, required),
(1, user_channel_id_low_opt, option),
- (2, reason, ignorable),
+ (2, reason, upgradable_required),
(3, user_channel_id_high_opt, option),
});
- if reason.is_none() { return Ok(None); }
// `user_channel_id` used to be a single u64 value. In order to remain
// backwards compatible with versions prior to 0.0.113, the u128 is serialized
let user_channel_id = (user_channel_id_low_opt.unwrap_or(0) as u128) +
((user_channel_id_high_opt.unwrap_or(0) as u128) << 64);
- Ok(Some(Event::ChannelClosed { channel_id, user_channel_id, reason: reason.unwrap() }))
+ Ok(Some(Event::ChannelClosed { channel_id, user_channel_id, reason: _init_tlv_based_struct_field!(reason, upgradable_required) }))
};
f()
},
19u8 => {
let f = || {
let mut payment_hash = PaymentHash([0; 32]);
- let mut purpose = None;
+ let mut purpose = UpgradableRequired(None);
let mut amount_msat = 0;
let mut receiver_node_id = None;
read_tlv_fields!(reader, {
(0, payment_hash, required),
(1, receiver_node_id, option),
- (2, purpose, ignorable),
+ (2, purpose, upgradable_required),
(4, amount_msat, required),
});
- if purpose.is_none() { return Ok(None); }
Ok(Some(Event::PaymentClaimed {
receiver_node_id,
payment_hash,
- purpose: purpose.unwrap(),
+ purpose: _init_tlv_based_struct_field!(purpose, upgradable_required),
amount_msat,
}))
};
25u8 => {
let f = || {
let mut prev_channel_id = [0; 32];
- let mut failed_next_destination_opt = None;
+ let mut failed_next_destination_opt = UpgradableRequired(None);
read_tlv_fields!(reader, {
(0, prev_channel_id, required),
- (2, failed_next_destination_opt, ignorable),
+ (2, failed_next_destination_opt, upgradable_required),
});
- if let Some(failed_next_destination) = failed_next_destination_opt {
- Ok(Some(Event::HTLCHandlingFailed {
- prev_channel_id,
- failed_next_destination,
- }))
- } else {
- // If we fail to read a `failed_next_destination` assume it's because
- // `MaybeReadable::read` returned `Ok(None)`, though it's also possible we
- // were simply missing the field.
- Ok(None)
- }
+ Ok(Some(Event::HTLCHandlingFailed {
+ prev_channel_id,
+ failed_next_destination: _init_tlv_based_struct_field!(failed_next_destination_opt, upgradable_required),
+ }))
};
f()
},
let f = || {
let mut channel_id = [0; 32];
let mut user_channel_id: u128 = 0;
- let mut counterparty_node_id = OptionDeserWrapper(None);
- let mut channel_type = OptionDeserWrapper(None);
+ let mut counterparty_node_id = RequiredWrapper(None);
+ let mut channel_type = RequiredWrapper(None);
read_tlv_fields!(reader, {
(0, channel_id, required),
(2, user_channel_id, required),
/// The channel_announcement which should be sent.
msg: msgs::ChannelAnnouncement,
/// The followup channel_update which should be sent.
- update_msg: msgs::ChannelUpdate,
+ update_msg: Option<msgs::ChannelUpdate>,
},
/// Used to indicate that a channel_update should be broadcast to all peers.
BroadcastChannelUpdate {
/// The channel_update which should be sent.
msg: msgs::ChannelUpdate,
},
+ /// Used to indicate that a node_announcement should be broadcast to all peers.
+ BroadcastNodeAnnouncement {
+ /// The node_announcement which should be sent.
+ msg: msgs::NodeAnnouncement,
+ },
/// Used to indicate that a channel_update should be sent to a single peer.
/// In contrast to [`Self::BroadcastChannelUpdate`], this is used when the channel is a
/// private channel and we shouldn't be informing all of our peers of channel parameters.
+++ /dev/null
-use std::sync::{LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, TryLockResult};
-use std::sync::atomic::{AtomicUsize, Ordering};
-
-/// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on
-/// Linux with pthreads under the hood, readers trivially and completely starve writers).
-/// Because we often hold read locks while doing message processing in multiple threads which
-/// can use significant CPU time, with write locks being time-sensitive but relatively small in
-/// CPU time, we can end up with starvation completely blocking incoming connections or pings,
-/// especially during initial graph sync.
-///
-/// Thus, we need to block readers when a writer is pending, which we do with a trivial RwLock
-/// wrapper here. Its not particularly optimized, but provides some reasonable fairness by
-/// blocking readers (by taking the write lock) if there are writers pending when we go to take
-/// a read lock.
-pub struct FairRwLock<T> {
- lock: RwLock<T>,
- waiting_writers: AtomicUsize,
-}
-
-impl<T> FairRwLock<T> {
- pub fn new(t: T) -> Self {
- Self { lock: RwLock::new(t), waiting_writers: AtomicUsize::new(0) }
- }
-
- // Note that all atomic accesses are relaxed, as we do not rely on the atomics here for any
- // ordering at all, instead relying on the underlying RwLock to provide ordering of unrelated
- // memory.
- pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
- self.waiting_writers.fetch_add(1, Ordering::Relaxed);
- let res = self.lock.write();
- self.waiting_writers.fetch_sub(1, Ordering::Relaxed);
- res
- }
-
- pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
- if self.waiting_writers.load(Ordering::Relaxed) != 0 {
- let _write_queue_lock = self.lock.write();
- }
- // Note that we don't consider ensuring that an underlying RwLock allowing writers to
- // starve readers doesn't exhibit the same behavior here. I'm not aware of any
- // libstd-backing RwLock which exhibits this behavior, and as documented in the
- // struct-level documentation, it shouldn't pose a significant issue for our current
- // codebase.
- self.lock.read()
- }
-
- pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
- self.lock.try_write()
- }
-}
//! This module has a map which can be iterated in a deterministic order. See the [`IndexedMap`].
use crate::prelude::{HashMap, hash_map};
-use alloc::collections::{BTreeSet, btree_set};
+use alloc::vec::Vec;
+use alloc::slice::Iter;
use core::hash::Hash;
use core::cmp::Ord;
-use core::ops::RangeBounds;
+use core::ops::{Bound, RangeBounds};
/// A map which can be iterated in a deterministic order.
///
/// actually backed by a [`HashMap`], with some additional tracking to ensure we can iterate over
/// keys in the order defined by [`Ord`].
///
+/// (C-not exported) as bindings provide alternate accessors rather than exposing maps directly.
+///
/// [`BTreeMap`]: alloc::collections::BTreeMap
-#[derive(Clone, Debug, PartialEq, Eq)]
+#[derive(Clone, Debug, Eq)]
pub struct IndexedMap<K: Hash + Ord, V> {
map: HashMap<K, V>,
- // TODO: Explore swapping this for a sorted vec (that is only sorted on first range() call)
- keys: BTreeSet<K>,
+ keys: Vec<K>,
}
impl<K: Clone + Hash + Ord, V> IndexedMap<K, V> {
pub fn new() -> Self {
Self {
map: HashMap::new(),
- keys: BTreeSet::new(),
+ keys: Vec::new(),
}
}
pub fn remove(&mut self, key: &K) -> Option<V> {
let ret = self.map.remove(key);
if let Some(_) = ret {
- assert!(self.keys.remove(key), "map and keys must be consistent");
+ let idx = self.keys.iter().position(|k| k == key).expect("map and keys must be consistent");
+ self.keys.remove(idx);
}
ret
}
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
let ret = self.map.insert(key.clone(), value);
if ret.is_none() {
- assert!(self.keys.insert(key), "map and keys must be consistent");
+ self.keys.push(key);
}
ret
}
}
/// Returns an iterator which iterates over the `key`/`value` pairs in a given range.
- pub fn range<R: RangeBounds<K>>(&self, range: R) -> Range<K, V> {
+ pub fn range<R: RangeBounds<K>>(&mut self, range: R) -> Range<K, V> {
+ self.keys.sort_unstable();
+ let start = match range.start_bound() {
+ Bound::Unbounded => 0,
+ Bound::Included(key) => self.keys.binary_search(key).unwrap_or_else(|index| index),
+ Bound::Excluded(key) => self.keys.binary_search(key).and_then(|index| Ok(index + 1)).unwrap_or_else(|index| index),
+ };
+ let end = match range.end_bound() {
+ Bound::Unbounded => self.keys.len(),
+ Bound::Included(key) => self.keys.binary_search(key).and_then(|index| Ok(index + 1)).unwrap_or_else(|index| index),
+ Bound::Excluded(key) => self.keys.binary_search(key).unwrap_or_else(|index| index),
+ };
+
Range {
- inner_range: self.keys.range(range),
+ inner_range: self.keys[start..end].iter(),
map: &self.map,
}
}
}
}
+impl<K: Hash + Ord + PartialEq, V: PartialEq> PartialEq for IndexedMap<K, V> {
+ fn eq(&self, other: &Self) -> bool {
+ self.map == other.map
+ }
+}
+
/// An iterator over a range of values in an [`IndexedMap`]
+///
+/// (C-not exported) as bindings provide alternate accessors rather than exposing maps directly.
pub struct Range<'a, K: Hash + Ord, V> {
- inner_range: btree_set::Range<'a, K>,
+ inner_range: Iter<'a, K>,
map: &'a HashMap<K, V>,
}
impl<'a, K: Hash + Ord, V: 'a> Iterator for Range<'a, K, V> {
}
/// An [`Entry`] for a key which currently has no value
+///
+/// (C-not exported) as bindings provide alternate accessors rather than exposing maps directly.
pub struct VacantEntry<'a, K: Hash + Ord, V> {
#[cfg(feature = "hashbrown")]
underlying_entry: hash_map::VacantEntry<'a, K, V, hash_map::DefaultHashBuilder>,
#[cfg(not(feature = "hashbrown"))]
underlying_entry: hash_map::VacantEntry<'a, K, V>,
key: K,
- keys: &'a mut BTreeSet<K>,
+ keys: &'a mut Vec<K>,
}
/// An [`Entry`] for an existing key-value pair
+///
+/// (C-not exported) as bindings provide alternate accessors rather than exposing maps directly.
pub struct OccupiedEntry<'a, K: Hash + Ord, V> {
#[cfg(feature = "hashbrown")]
underlying_entry: hash_map::OccupiedEntry<'a, K, V, hash_map::DefaultHashBuilder>,
#[cfg(not(feature = "hashbrown"))]
underlying_entry: hash_map::OccupiedEntry<'a, K, V>,
- keys: &'a mut BTreeSet<K>,
+ keys: &'a mut Vec<K>,
}
/// A mutable reference to a position in the map. This can be used to reference, add, or update the
/// value at a fixed key.
+///
+/// (C-not exported) as bindings provide alternate accessors rather than exposing maps directly.
pub enum Entry<'a, K: Hash + Ord, V> {
/// A mutable reference to a position within the map where there is no value.
Vacant(VacantEntry<'a, K, V>),
impl<'a, K: Hash + Ord, V> VacantEntry<'a, K, V> {
/// Insert a value into the position described by this entry.
pub fn insert(self, value: V) -> &'a mut V {
- assert!(self.keys.insert(self.key), "map and keys must be consistent");
+ self.keys.push(self.key);
self.underlying_entry.insert(value)
}
}
/// Remove the value at the position described by this entry.
pub fn remove_entry(self) -> (K, V) {
let res = self.underlying_entry.remove_entry();
- assert!(self.keys.remove(&res.0), "map and keys must be consistent");
+ let idx = self.keys.iter().position(|k| k == &res.0).expect("map and keys must be consistent");
+ self.keys.remove(idx);
res
}
($logger: expr, $lvl:expr, $($arg:tt)+) => (
match $lvl {
#[cfg(not(any(feature = "max_level_off")))]
- $crate::util::logger::Level::Error => log_internal!($logger, $lvl, $($arg)*),
+ $crate::util::logger::Level::Error => $crate::log_internal!($logger, $lvl, $($arg)*),
#[cfg(not(any(feature = "max_level_off", feature = "max_level_error")))]
- $crate::util::logger::Level::Warn => log_internal!($logger, $lvl, $($arg)*),
+ $crate::util::logger::Level::Warn => $crate::log_internal!($logger, $lvl, $($arg)*),
#[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn")))]
- $crate::util::logger::Level::Info => log_internal!($logger, $lvl, $($arg)*),
+ $crate::util::logger::Level::Info => $crate::log_internal!($logger, $lvl, $($arg)*),
#[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info")))]
- $crate::util::logger::Level::Debug => log_internal!($logger, $lvl, $($arg)*),
+ $crate::util::logger::Level::Debug => $crate::log_internal!($logger, $lvl, $($arg)*),
#[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug")))]
- $crate::util::logger::Level::Trace => log_internal!($logger, $lvl, $($arg)*),
+ $crate::util::logger::Level::Trace => $crate::log_internal!($logger, $lvl, $($arg)*),
#[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug", feature = "max_level_trace")))]
- $crate::util::logger::Level::Gossip => log_internal!($logger, $lvl, $($arg)*),
+ $crate::util::logger::Level::Gossip => $crate::log_internal!($logger, $lvl, $($arg)*),
#[cfg(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug", feature = "max_level_trace"))]
_ => {
#[macro_export]
macro_rules! log_error {
($logger: expr, $($arg:tt)*) => (
- log_given_level!($logger, $crate::util::logger::Level::Error, $($arg)*);
+ $crate::log_given_level!($logger, $crate::util::logger::Level::Error, $($arg)*);
)
}
#[macro_export]
macro_rules! log_warn {
($logger: expr, $($arg:tt)*) => (
- log_given_level!($logger, $crate::util::logger::Level::Warn, $($arg)*);
+ $crate::log_given_level!($logger, $crate::util::logger::Level::Warn, $($arg)*);
)
}
#[macro_export]
macro_rules! log_info {
($logger: expr, $($arg:tt)*) => (
- log_given_level!($logger, $crate::util::logger::Level::Info, $($arg)*);
+ $crate::log_given_level!($logger, $crate::util::logger::Level::Info, $($arg)*);
)
}
#[macro_export]
macro_rules! log_debug {
($logger: expr, $($arg:tt)*) => (
- log_given_level!($logger, $crate::util::logger::Level::Debug, $($arg)*);
+ $crate::log_given_level!($logger, $crate::util::logger::Level::Debug, $($arg)*);
)
}
#[macro_export]
macro_rules! log_trace {
($logger: expr, $($arg:tt)*) => (
- log_given_level!($logger, $crate::util::logger::Level::Trace, $($arg)*)
+ $crate::log_given_level!($logger, $crate::util::logger::Level::Trace, $($arg)*)
)
}
#[macro_export]
macro_rules! log_gossip {
($logger: expr, $($arg:tt)*) => (
- log_given_level!($logger, $crate::util::logger::Level::Gossip, $($arg)*);
+ $crate::log_given_level!($logger, $crate::util::logger::Level::Gossip, $($arg)*);
)
}
pub(crate) mod atomic_counter;
pub(crate) mod byte_utils;
pub(crate) mod chacha20;
-#[cfg(all(any(feature = "_bench_unstable", not(test)), feature = "std"))]
-pub(crate) mod fairrwlock;
#[cfg(fuzzing)]
pub mod zbase32;
#[cfg(not(fuzzing))]
/// Writer that only tracks the amount of data written - useful if you need to calculate the length
/// of some data when serialized but don't yet need the full data.
+///
+/// (C-not exported) as manual TLV building is not currently supported in bindings
pub struct LengthCalculatingWriter(pub usize);
impl Writer for LengthCalculatingWriter {
#[inline]
/// Essentially [`std::io::Take`] but a bit simpler and with a method to walk the underlying stream
/// forward to ensure we always consume exactly the fixed length specified.
+///
+/// (C-not exported) as manual TLV building is not currently supported in bindings
pub struct FixedLengthReader<R: Read> {
read: R,
bytes_read: u64,
/// A [`Read`] implementation which tracks whether any bytes have been read at all. This allows us to distinguish
/// between "EOF reached before we started" and "EOF reached mid-read".
+///
+/// (C-not exported) as manual TLV building is not currently supported in bindings
pub struct ReadTrackingReader<R: Read> {
read: R,
/// Returns whether we have read from this reader or not yet.
}
/// Wrapper to read a required (non-optional) TLV record.
-pub struct OptionDeserWrapper<T: Readable>(pub Option<T>);
-impl<T: Readable> Readable for OptionDeserWrapper<T> {
+///
+/// (C-not exported) as manual TLV building is not currently supported in bindings
+pub struct RequiredWrapper<T>(pub Option<T>);
+impl<T: Readable> Readable for RequiredWrapper<T> {
#[inline]
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
Ok(Self(Some(Readable::read(reader)?)))
}
}
+impl<A, T: ReadableArgs<A>> ReadableArgs<A> for RequiredWrapper<T> {
+ #[inline]
+ fn read<R: Read>(reader: &mut R, args: A) -> Result<Self, DecodeError> {
+ Ok(Self(Some(ReadableArgs::read(reader, args)?)))
+ }
+}
/// When handling `default_values`, we want to map the default-value T directly
-/// to a `OptionDeserWrapper<T>` in a way that works for `field: T = t;` as
+/// to a `RequiredWrapper<T>` in a way that works for `field: T = t;` as
/// well. Thus, we assume `Into<T> for T` does nothing and use that.
-impl<T: Readable> From<T> for OptionDeserWrapper<T> {
- fn from(t: T) -> OptionDeserWrapper<T> { OptionDeserWrapper(Some(t)) }
+impl<T> From<T> for RequiredWrapper<T> {
+ fn from(t: T) -> RequiredWrapper<T> { RequiredWrapper(Some(t)) }
+}
+
+/// Wrapper to read a required (non-optional) TLV record that may have been upgraded without
+/// backwards compat.
+///
+/// (C-not exported) as manual TLV building is not currently supported in bindings
+pub struct UpgradableRequired<T: MaybeReadable>(pub Option<T>);
+impl<T: MaybeReadable> MaybeReadable for UpgradableRequired<T> {
+ #[inline]
+ fn read<R: Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
+ let tlv = MaybeReadable::read(reader)?;
+ if let Some(tlv) = tlv { return Ok(Some(Self(Some(tlv)))) }
+ Ok(None)
+ }
}
pub(crate) struct U48(pub u64);
/// A type for variable-length values within TLV record where the length is encoded as part of the record.
/// Used to prevent encoding the length twice.
+///
+/// (C-not exported) as manual TLV building is not currently supported in bindings
pub struct WithoutLength<T>(pub T);
impl Writeable for WithoutLength<&String> {
}
impl_for_vec!(ecdsa::Signature);
+impl_for_vec!(crate::ln::channelmanager::MonitorUpdateCompletionAction);
impl_for_vec!((A, B), A, B);
impl Writeable for Script {
hostname.write(&mut buf).unwrap();
assert_eq!(Hostname::read(&mut buf.as_slice()).unwrap().as_str(), "test");
}
+
+ #[test]
+ fn bigsize_encoding_decoding() {
+ let values = vec![0, 252, 253, 65535, 65536, 4294967295, 4294967296, 18446744073709551615];
+ let bytes = vec![
+ "00",
+ "fc",
+ "fd00fd",
+ "fdffff",
+ "fe00010000",
+ "feffffffff",
+ "ff0000000100000000",
+ "ffffffffffffffffff"
+ ];
+ for i in 0..=7 {
+ let mut stream = crate::io::Cursor::new(::hex::decode(bytes[i]).unwrap());
+ assert_eq!(super::BigSize::read(&mut stream).unwrap().0, values[i]);
+ let mut stream = super::VecWriter(Vec::new());
+ super::BigSize(values[i]).write(&mut stream).unwrap();
+ assert_eq!(stream.0, ::hex::decode(bytes[i]).unwrap());
+ }
+ let err_bytes = vec![
+ "fd00fc",
+ "fe0000ffff",
+ "ff00000000ffffffff",
+ "fd00",
+ "feffff",
+ "ffffffffff",
+ "fd",
+ "fe",
+ "ff",
+ ""
+ ];
+ for i in 0..=9 {
+ let mut stream = crate::io::Cursor::new(::hex::decode(err_bytes[i]).unwrap());
+ if i < 3 {
+ assert_eq!(super::BigSize::read(&mut stream).err(), Some(crate::ln::msgs::DecodeError::InvalidValue));
+ } else {
+ assert_eq!(super::BigSize::read(&mut stream).err(), Some(crate::ln::msgs::DecodeError::ShortRead));
+ }
+ }
+ }
}
field.write($stream)?;
}
};
- ($stream: expr, $type: expr, $field: expr, ignorable) => {
+ ($stream: expr, $type: expr, $field: expr, upgradable_required) => {
$crate::_encode_tlv!($stream, $type, $field, required);
};
+ ($stream: expr, $type: expr, $field: expr, upgradable_option) => {
+ $crate::_encode_tlv!($stream, $type, $field, option);
+ };
($stream: expr, $type: expr, $field: expr, (option, encoding: ($fieldty: ty, $encoding: ident))) => {
$crate::_encode_tlv!($stream, $type, $field.map(|f| $encoding(f)), option);
};
($stream: expr, $type: expr, $field: expr, (option, encoding: $fieldty: ty)) => {
$crate::_encode_tlv!($stream, $type, $field, option);
};
+ ($stream: expr, $type: expr, $field: expr, (option: $trait: ident $(, $read_arg: expr)?)) => {
+ // Just a read-mapped type
+ $crate::_encode_tlv!($stream, $type, $field, option);
+ };
}
/// Panics if the last seen TLV type is not numerically less than the TLV type currently being checked.
$len.0 += field_len;
}
};
- ($len: expr, $type: expr, $field: expr, ignorable) => {
+ ($len: expr, $type: expr, $field: expr, (option: $trait: ident $(, $read_arg: expr)?)) => {
+ $crate::_get_varint_length_prefixed_tlv_length!($len, $type, $field, option);
+ };
+ ($len: expr, $type: expr, $field: expr, upgradable_required) => {
$crate::_get_varint_length_prefixed_tlv_length!($len, $type, $field, required);
};
+ ($len: expr, $type: expr, $field: expr, upgradable_option) => {
+ $crate::_get_varint_length_prefixed_tlv_length!($len, $type, $field, option);
+ };
}
/// See the documentation of [`write_tlv_fields`].
return Err(DecodeError::InvalidValue);
}
}};
+ ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (required: $trait: ident $(, $read_arg: expr)?)) => {{
+ $crate::_check_decoded_tlv_order!($last_seen_type, $typ, $type, $field, required);
+ }};
($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, option) => {{
// no-op
}};
($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, vec_type) => {{
// no-op
}};
- ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, ignorable) => {{
+ ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, upgradable_required) => {{
+ _check_decoded_tlv_order!($last_seen_type, $typ, $type, $field, required)
+ }};
+ ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, upgradable_option) => {{
// no-op
}};
($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (option: $trait: ident $(, $read_arg: expr)?)) => {{
return Err(DecodeError::InvalidValue);
}
}};
+ ($last_seen_type: expr, $type: expr, $field: ident, (required: $trait: ident $(, $read_arg: expr)?)) => {{
+ $crate::_check_missing_tlv!($last_seen_type, $type, $field, required);
+ }};
($last_seen_type: expr, $type: expr, $field: ident, vec_type) => {{
// no-op
}};
($last_seen_type: expr, $type: expr, $field: ident, option) => {{
// no-op
}};
- ($last_seen_type: expr, $type: expr, $field: ident, ignorable) => {{
+ ($last_seen_type: expr, $type: expr, $field: ident, upgradable_required) => {{
+ _check_missing_tlv!($last_seen_type, $type, $field, required)
+ }};
+ ($last_seen_type: expr, $type: expr, $field: ident, upgradable_option) => {{
// no-op
}};
($last_seen_type: expr, $type: expr, $field: ident, (option: $trait: ident $(, $read_arg: expr)?)) => {{
($reader: expr, $field: ident, required) => {{
$field = $crate::util::ser::Readable::read(&mut $reader)?;
}};
+ ($reader: expr, $field: ident, (required: $trait: ident $(, $read_arg: expr)?)) => {{
+ $field = $trait::read(&mut $reader $(, $read_arg)*)?;
+ }};
($reader: expr, $field: ident, vec_type) => {{
let f: $crate::util::ser::WithoutLength<Vec<_>> = $crate::util::ser::Readable::read(&mut $reader)?;
$field = Some(f.0);
($reader: expr, $field: ident, option) => {{
$field = Some($crate::util::ser::Readable::read(&mut $reader)?);
}};
- ($reader: expr, $field: ident, ignorable) => {{
+ // `upgradable_required` indicates we're reading a required TLV that may have been upgraded
+ // without backwards compat. We'll error if the field is missing, and return `Ok(None)` if the
+ // field is present but we can no longer understand it.
+ // Note that this variant can only be used within a `MaybeReadable` read.
+ ($reader: expr, $field: ident, upgradable_required) => {{
+ $field = match $crate::util::ser::MaybeReadable::read(&mut $reader)? {
+ Some(res) => res,
+ _ => return Ok(None)
+ };
+ }};
+ // `upgradable_option` indicates we're reading an Option-al TLV that may have been upgraded
+ // without backwards compat. $field will be None if the TLV is missing or if the field is present
+ // but we can no longer understand it.
+ ($reader: expr, $field: ident, upgradable_option) => {{
$field = $crate::util::ser::MaybeReadable::read(&mut $reader)?;
}};
($reader: expr, $field: ident, (option: $trait: ident $(, $read_arg: expr)?)) => {{
macro_rules! _decode_tlv_stream_range {
($stream: expr, $range: expr, $rewind: ident, {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}
$(, $decode_custom_tlv: expr)?) => { {
- use core::ops::RangeBounds;
use $crate::ln::msgs::DecodeError;
let mut last_seen_type: Option<u64> = None;
let mut stream_ref = $stream;
}
},
Err(e) => return Err(e),
- Ok(t) => if $range.contains(&t.0) { t } else {
+ Ok(t) => if core::ops::RangeBounds::contains(&$range, &t.0) { t } else {
drop(tracking_reader);
// Assumes the type id is minimally encoded, which is enforced on read.
($field: ident, option) => {
$field
};
- ($field: ident, ignorable) => {
- if $field.is_none() { return Ok(None); } else { $field.unwrap() }
+ ($field: ident, (option: $trait: ident $(, $read_arg: expr)?)) => {
+ $crate::_init_tlv_based_struct_field!($field, option)
+ };
+ ($field: ident, upgradable_required) => {
+ $field.0.unwrap()
+ };
+ ($field: ident, upgradable_option) => {
+ $field
};
($field: ident, required) => {
$field.0.unwrap()
#[macro_export]
macro_rules! _init_tlv_field_var {
($field: ident, (default_value, $default: expr)) => {
- let mut $field = $crate::util::ser::OptionDeserWrapper(None);
+ let mut $field = $crate::util::ser::RequiredWrapper(None);
};
($field: ident, (static_value, $value: expr)) => {
let $field;
};
($field: ident, required) => {
- let mut $field = $crate::util::ser::OptionDeserWrapper(None);
+ let mut $field = $crate::util::ser::RequiredWrapper(None);
+ };
+ ($field: ident, (required: $trait: ident $(, $read_arg: expr)?)) => {
+ $crate::_init_tlv_field_var!($field, required);
};
($field: ident, vec_type) => {
let mut $field = Some(Vec::new());
($field: ident, option) => {
let mut $field = None;
};
- ($field: ident, ignorable) => {
+ ($field: ident, (option: $trait: ident $(, $read_arg: expr)?)) => {
+ $crate::_init_tlv_field_var!($field, option);
+ };
+ ($field: ident, upgradable_required) => {
+ let mut $field = $crate::util::ser::UpgradableRequired(None);
+ };
+ ($field: ident, upgradable_option) => {
let mut $field = None;
};
}
($type:ty) => { &'a $type };
}
+#[doc(hidden)]
+#[macro_export]
macro_rules! _impl_writeable_tlv_based_enum_common {
($st: ident, $(($variant_id: expr, $variant_name: ident) =>
{$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}
$($st::$variant_name { $(ref $field),* } => {
let id: u8 = $variant_id;
id.write(writer)?;
- write_tlv_fields!(writer, {
+ $crate::write_tlv_fields!(writer, {
$(($type, *$field, $fieldty)),*
});
}),*
{$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}
),* $(,)*;
$(($tuple_variant_id: expr, $tuple_variant_name: ident)),* $(,)*) => {
- _impl_writeable_tlv_based_enum_common!($st,
+ $crate::_impl_writeable_tlv_based_enum_common!($st,
$(($variant_id, $variant_name) => {$(($type, $field, $fieldty)),*}),*;
$(($tuple_variant_id, $tuple_variant_name)),*);
// Because read_tlv_fields creates a labeled loop, we cannot call it twice
// in the same function body. Instead, we define a closure and call it.
let f = || {
- _init_and_read_tlv_fields!(reader, {
+ $crate::_init_and_read_tlv_fields!(reader, {
$(($type, $field, $fieldty)),*
});
Ok($st::$variant_name {
$(
- $field: _init_tlv_based_struct_field!($field, $fieldty)
+ $field: $crate::_init_tlv_based_struct_field!($field, $fieldty)
),*
})
};
),* $(,)*
$(;
$(($tuple_variant_id: expr, $tuple_variant_name: ident)),* $(,)*)*) => {
- _impl_writeable_tlv_based_enum_common!($st,
+ $crate::_impl_writeable_tlv_based_enum_common!($st,
$(($variant_id, $variant_name) => {$(($type, $field, $fieldty)),*}),*;
$($(($tuple_variant_id, $tuple_variant_name)),*)*);
// Because read_tlv_fields creates a labeled loop, we cannot call it twice
// in the same function body. Instead, we define a closure and call it.
let f = || {
- _init_and_read_tlv_fields!(reader, {
+ $crate::_init_and_read_tlv_fields!(reader, {
$(($type, $field, $fieldty)),*
});
Ok(Some($st::$variant_name {
$(
- $field: _init_tlv_based_struct_field!($field, $fieldty)
+ $field: $crate::_init_tlv_based_struct_field!($field, $fieldty)
),*
}))
};
Ok(Some($st::$tuple_variant_name(Readable::read(reader)?)))
}),*)*
_ if id % 2 == 1 => Ok(None),
- _ => Err(DecodeError::UnknownRequiredFeature),
+ _ => Err($crate::ln::msgs::DecodeError::UnknownRequiredFeature),
}
}
}
(0xdeadbeef1badbeef, 0x1bad1dea, Some(0x01020304)));
}
+ #[derive(Debug, PartialEq)]
+ struct TestUpgradable {
+ a: u32,
+ b: u32,
+ c: Option<u32>,
+ }
+
+ fn upgradable_tlv_reader(s: &[u8]) -> Result<Option<TestUpgradable>, DecodeError> {
+ let mut s = Cursor::new(s);
+ let mut a = 0;
+ let mut b = 0;
+ let mut c: Option<u32> = None;
+ decode_tlv_stream!(&mut s, {(2, a, upgradable_required), (3, b, upgradable_required), (4, c, upgradable_option)});
+ Ok(Some(TestUpgradable { a, b, c, }))
+ }
+
+ #[test]
+ fn upgradable_tlv_simple_good_cases() {
+ assert_eq!(upgradable_tlv_reader(&::hex::decode(
+ concat!("0204deadbeef", "03041bad1dea", "0404deadbeef")
+ ).unwrap()[..]).unwrap(),
+ Some(TestUpgradable { a: 0xdeadbeef, b: 0x1bad1dea, c: Some(0xdeadbeef) }));
+
+ assert_eq!(upgradable_tlv_reader(&::hex::decode(
+ concat!("0204deadbeef", "03041bad1dea")
+ ).unwrap()[..]).unwrap(),
+ Some(TestUpgradable { a: 0xdeadbeef, b: 0x1bad1dea, c: None}));
+ }
+
+ #[test]
+ fn missing_required_upgradable() {
+ if let Err(DecodeError::InvalidValue) = upgradable_tlv_reader(&::hex::decode(
+ concat!("0100", "0204deadbeef")
+ ).unwrap()[..]) {
+ } else { panic!(); }
+ if let Err(DecodeError::InvalidValue) = upgradable_tlv_reader(&::hex::decode(
+ concat!("0100", "03041bad1dea")
+ ).unwrap()[..]) {
+ } else { panic!(); }
+ }
+
// BOLT TLV test cases
fn tlv_reader_n1(s: &[u8]) -> Result<(Option<HighZeroBytesDroppedBigSize<u64>>, Option<u64>, Option<(PublicKey, u64, u64)>, Option<u16>), DecodeError> {
let mut s = Cursor::new(s);
use crate::ln::{msgs, wire};
use crate::ln::msgs::LightningError;
use crate::ln::script::ShutdownScript;
-use crate::routing::gossip::NetworkGraph;
-use crate::routing::gossip::NodeId;
+use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
+use crate::routing::utxo::{UtxoLookup, UtxoLookupError, UtxoResult};
use crate::routing::router::{find_route, InFlightHtlcs, Route, RouteHop, RouteParameters, Router, ScorerAccountingForInFlightHtlcs};
-use crate::routing::scoring::FixedPenaltyScorer;
+use crate::routing::scoring::{ChannelUsage, Score};
use crate::util::config::UserConfig;
use crate::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
use crate::util::events;
use crate::io;
use crate::prelude::*;
+use core::cell::RefCell;
use core::time::Duration;
use crate::sync::{Mutex, Arc};
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
pub struct TestRouter<'a> {
pub network_graph: Arc<NetworkGraph<&'a TestLogger>>,
pub next_routes: Mutex<VecDeque<(RouteParameters, Result<Route, LightningError>)>>,
+ pub scorer: &'a Mutex<TestScorer>,
}
impl<'a> TestRouter<'a> {
- pub fn new(network_graph: Arc<NetworkGraph<&'a TestLogger>>) -> Self {
- Self { network_graph, next_routes: Mutex::new(VecDeque::new()), }
+ pub fn new(network_graph: Arc<NetworkGraph<&'a TestLogger>>, scorer: &'a Mutex<TestScorer>) -> Self {
+ Self { network_graph, next_routes: Mutex::new(VecDeque::new()), scorer }
}
pub fn expect_find_route(&self, query: RouteParameters, result: Result<Route, LightningError>) {
) -> Result<Route, msgs::LightningError> {
if let Some((find_route_query, find_route_res)) = self.next_routes.lock().unwrap().pop_front() {
assert_eq!(find_route_query, *params);
+ if let Ok(ref route) = find_route_res {
+ let locked_scorer = self.scorer.lock().unwrap();
+ let scorer = ScorerAccountingForInFlightHtlcs::new(locked_scorer, inflight_htlcs);
+ for path in &route.paths {
+ let mut aggregate_msat = 0u64;
+ for (idx, hop) in path.iter().rev().enumerate() {
+ aggregate_msat += hop.fee_msat;
+ let usage = ChannelUsage {
+ amount_msat: aggregate_msat,
+ inflight_htlc_msat: 0,
+ effective_capacity: EffectiveCapacity::Unknown,
+ };
+
+ // Since the path is reversed, the last element in our iteration is the first
+ // hop.
+ if idx == path.len() - 1 {
+ scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage);
+ } else {
+ let curr_hop_path_idx = path.len() - 1 - idx;
+ scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path[curr_hop_path_idx - 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage);
+ }
+ }
+ }
+ }
return find_route_res;
}
let logger = TestLogger::new();
+ let scorer = self.scorer.lock().unwrap();
find_route(
payer, params, &self.network_graph, first_hops, &logger,
- &ScorerAccountingForInFlightHtlcs::new(TestScorer::with_penalty(0), &inflight_htlcs),
+ &ScorerAccountingForInFlightHtlcs::new(scorer, &inflight_htlcs),
&[42; 32]
)
}
- fn notify_payment_path_failed(&self, _path: &[&RouteHop], _short_channel_id: u64) {}
- fn notify_payment_path_successful(&self, _path: &[&RouteHop]) {}
- fn notify_payment_probe_successful(&self, _path: &[&RouteHop]) {}
- fn notify_payment_probe_failed(&self, _path: &[&RouteHop], _short_channel_id: u64) {}
}
-#[cfg(feature = "std")] // If we put this on the `if`, we get "attributes are not yet allowed on `if` expressions" on 1.41.1
impl<'a> Drop for TestRouter<'a> {
fn drop(&mut self) {
- if std::thread::panicking() {
- return;
+ #[cfg(feature = "std")] {
+ if std::thread::panicking() {
+ return;
+ }
}
assert!(self.next_routes.lock().unwrap().is_empty());
}
pub struct TestChannelMessageHandler {
pub pending_events: Mutex<Vec<events::MessageSendEvent>>,
expected_recv_msgs: Mutex<Option<Vec<wire::Message<()>>>>,
+ connected_peers: Mutex<HashSet<PublicKey>>,
}
impl TestChannelMessageHandler {
TestChannelMessageHandler {
pending_events: Mutex::new(Vec::new()),
expected_recv_msgs: Mutex::new(None),
+ connected_peers: Mutex::new(HashSet::new()),
}
}
fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
self.received_msg(wire::Message::ChannelReestablish(msg.clone()));
}
- fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
- fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &msgs::Init) -> Result<(), ()> {
+ fn peer_disconnected(&self, their_node_id: &PublicKey) {
+ assert!(self.connected_peers.lock().unwrap().remove(their_node_id));
+ }
+ fn peer_connected(&self, their_node_id: &PublicKey, _msg: &msgs::Init, _inbound: bool) -> Result<(), ()> {
+ assert!(self.connected_peers.lock().unwrap().insert(their_node_id.clone()));
// Don't bother with `received_msg` for Init as its auto-generated and we don't want to
// bother re-generating the expected Init message in all tests.
Ok(())
None
}
- fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> {
+ fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init, _inbound: bool) -> Result<(), ()> {
if !init_msg.features.supports_gossip_queries() {
return Ok(());
}
features.set_gossip_queries_optional();
features
}
+
+ fn processing_queue_high(&self) -> bool { false }
}
impl events::MessageSendEventsProvider for TestRoutingMessageHandler {
pub struct TestChainSource {
pub genesis_hash: BlockHash,
- pub utxo_ret: Mutex<Result<TxOut, chain::AccessError>>,
+ pub utxo_ret: Mutex<UtxoResult>,
+ pub get_utxo_call_count: AtomicUsize,
pub watched_txn: Mutex<HashSet<(Txid, Script)>>,
pub watched_outputs: Mutex<HashSet<(OutPoint, Script)>>,
}
let script_pubkey = Builder::new().push_opcode(opcodes::OP_TRUE).into_script();
Self {
genesis_hash: genesis_block(network).block_hash(),
- utxo_ret: Mutex::new(Ok(TxOut { value: u64::max_value(), script_pubkey })),
+ utxo_ret: Mutex::new(UtxoResult::Sync(Ok(TxOut { value: u64::max_value(), script_pubkey }))),
+ get_utxo_call_count: AtomicUsize::new(0),
watched_txn: Mutex::new(HashSet::new()),
watched_outputs: Mutex::new(HashSet::new()),
}
}
}
-impl chain::Access for TestChainSource {
- fn get_utxo(&self, genesis_hash: &BlockHash, _short_channel_id: u64) -> Result<TxOut, chain::AccessError> {
+impl UtxoLookup for TestChainSource {
+ fn get_utxo(&self, genesis_hash: &BlockHash, _short_channel_id: u64) -> UtxoResult {
+ self.get_utxo_call_count.fetch_add(1, Ordering::Relaxed);
if self.genesis_hash != *genesis_hash {
- return Err(chain::AccessError::UnknownChain);
+ return UtxoResult::Sync(Err(UtxoLookupError::UnknownChain));
}
self.utxo_ret.lock().unwrap().clone()
}
}
-/// A scorer useful in testing, when the passage of time isn't a concern.
-pub type TestScorer = FixedPenaltyScorer;
+pub struct TestScorer {
+ /// Stores a tuple of (scid, ChannelUsage)
+ scorer_expectations: RefCell<Option<VecDeque<(u64, ChannelUsage)>>>,
+}
+
+impl TestScorer {
+ pub fn new() -> Self {
+ Self {
+ scorer_expectations: RefCell::new(None),
+ }
+ }
+
+ pub fn expect_usage(&self, scid: u64, expectation: ChannelUsage) {
+ self.scorer_expectations.borrow_mut().get_or_insert_with(|| VecDeque::new()).push_back((scid, expectation));
+ }
+}
+
+#[cfg(c_bindings)]
+impl crate::util::ser::Writeable for TestScorer {
+ fn write<W: crate::util::ser::Writer>(&self, _: &mut W) -> Result<(), crate::io::Error> { unreachable!(); }
+}
+
+impl Score for TestScorer {
+ fn channel_penalty_msat(
+ &self, short_channel_id: u64, _source: &NodeId, _target: &NodeId, usage: ChannelUsage
+ ) -> u64 {
+ if let Some(scorer_expectations) = self.scorer_expectations.borrow_mut().as_mut() {
+ match scorer_expectations.pop_front() {
+ Some((scid, expectation)) => {
+ assert_eq!(expectation, usage);
+ assert_eq!(scid, short_channel_id);
+ },
+ None => {},
+ }
+ }
+ 0
+ }
+
+ fn payment_path_failed(&mut self, _actual_path: &[&RouteHop], _actual_short_channel_id: u64) {}
+
+ fn payment_path_successful(&mut self, _actual_path: &[&RouteHop]) {}
+
+ fn probe_failed(&mut self, _actual_path: &[&RouteHop], _: u64) {}
+
+ fn probe_successful(&mut self, _actual_path: &[&RouteHop]) {}
+}
+
+impl Drop for TestScorer {
+ fn drop(&mut self) {
+ #[cfg(feature = "std")] {
+ if std::thread::panicking() {
+ return;
+ }
+ }
+
+ if let Some(scorer_expectations) = self.scorer_expectations.borrow().as_ref() {
+ if !scorer_expectations.is_empty() {
+ panic!("Unsatisfied scorer expectations: {:?}", scorer_expectations)
+ }
+ }
+ }
+}
pub(crate) fn notify(&self) {
let mut lock = self.notify_pending.lock().unwrap();
if let Some(future_state) = &lock.1 {
- future_state.lock().unwrap().complete();
+ if future_state.lock().unwrap().complete() {
+ lock.1 = None;
+ return;
+ }
}
lock.0 = true;
mem::drop(lock);
}
impl FutureState {
- fn complete(&mut self) {
+ fn complete(&mut self) -> bool {
for (counts_as_call, callback) in self.callbacks.drain(..) {
callback.call();
self.callbacks_made |= counts_as_call;
}
self.complete = true;
+ self.callbacks_made
}
}
assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Ready(()));
assert!(!notifier.wait_timeout(Duration::from_millis(1)));
}
+
+ #[test]
+ fn test_poll_post_notify_completes() {
+ // Tests that if we have a future state that has completed, and we haven't yet requested a
+ // new future, if we get a notify prior to requesting that second future it is generated
+ // pre-completed.
+ let notifier = Notifier::new();
+
+ notifier.notify();
+ let mut future = notifier.get_future();
+ let (woken, waker) = create_waker();
+ assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Ready(()));
+ assert!(!woken.load(Ordering::SeqCst));
+
+ notifier.notify();
+ let mut future = notifier.get_future();
+ let (woken, waker) = create_waker();
+ assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Ready(()));
+ assert!(!woken.load(Ordering::SeqCst));
+
+ let mut future = notifier.get_future();
+ let (woken, waker) = create_waker();
+ assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Pending);
+ assert!(!woken.load(Ordering::SeqCst));
+
+ notifier.notify();
+ assert!(woken.load(Ordering::SeqCst));
+ assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Ready(()));
+ }
+
+ #[test]
+ fn test_poll_post_notify_completes_initial_notified() {
+ // Identical to the previous test, but the first future completes via a wake rather than an
+ // immediate `Poll::Ready`.
+ let notifier = Notifier::new();
+
+ let mut future = notifier.get_future();
+ let (woken, waker) = create_waker();
+ assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Pending);
+
+ notifier.notify();
+ assert!(woken.load(Ordering::SeqCst));
+ assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Ready(()));
+
+ notifier.notify();
+ let mut future = notifier.get_future();
+ let (woken, waker) = create_waker();
+ assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Ready(()));
+ assert!(!woken.load(Ordering::SeqCst));
+
+ let mut future = notifier.get_future();
+ let (woken, waker) = create_waker();
+ assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Pending);
+ assert!(!woken.load(Ordering::SeqCst));
+
+ notifier.notify();
+ assert!(woken.load(Ordering::SeqCst));
+ assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Ready(()));
+ }
}
lightning-invoice = { path = "../lightning-invoice", default-features = false }
lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync", default-features = false }
lightning-background-processor = { path = "../lightning-background-processor", features = ["futures"], default-features = false }
+
+# Obviously lightning-transaction-sync doesn't support no-std, but it should build
+# even if lightning is built with no-std.
+lightning-transaction-sync = { path = "../lightning-transaction-sync", optional = true }
+++ /dev/null
-## API Updates
-- The functions `inbound_payment::{create, create_from_hash}` and
- `channelmanager::{create_inbound_payment, create_inbound_payment_for_hash}` now accept a
- `min_final_cltv_expiry_delta` argument. This encodes the `min_final_cltv_expiry_delta` in the
- payment secret metadata bytes to be validated on payment receipt.
-
-## Backwards Compatibility
-- If `min_final_cltv_expiry_delta` set for any of `inbound_payment::{create, create_from_hash}` or
- `channelmanager::{create_inbound_payment, create_inbound_payment_for_hash}` then the payment will
- not be receivable on versions of LDK prior to 0.0.114.
-
--- /dev/null
+## API Updates
+
+- `Event::PaymentPathFailed::retry` will always be `None` if we initiate a payment on 0.0.115
+ then downgrade to an earlier version (#2063)