strategy:
fail-fast: false
matrix:
- platform: [ ubuntu-latest ]
- toolchain: [ stable,
- beta,
- # 1.41.1 is MSRV for Rust-Lightning, lightning-invoice, and lightning-persister
- 1.41.1,
- # 1.45.2 is MSRV for lightning-net-tokio, lightning-block-sync, lightning-background-processor
- 1.45.2,
- # 1.47.0 will be the MSRV for no-std builds using hashbrown once core2 is updated
- 1.47.0]
+ platform: [ ubuntu-latest, windows-latest, macos-latest ]
+ toolchain: [ stable, beta ]
include:
- toolchain: stable
- build-net-tokio: true
- build-no-std: true
- build-futures: true
- build-tx-sync: true
+ platform: ubuntu-latest
coverage: true
- - toolchain: stable
- platform: macos-latest
- build-net-tokio: true
- build-no-std: true
- build-futures: true
- build-tx-sync: true
- - toolchain: stable
- test-custom-message: true
- - toolchain: beta
- platform: macos-latest
- build-net-tokio: true
- build-no-std: true
- build-futures: true
- build-tx-sync: true
- - toolchain: stable
- platform: windows-latest
- build-net-tokio: true
- build-no-std: true
- build-futures: true
- build-tx-sync: false
- - toolchain: beta
+ # 1.48.0 is the MSRV for all crates except lightning-transaction-sync and Win/Mac
+ - toolchain: 1.48.0
+ platform: ubuntu-latest
+ # Windows requires 1.49.0 because that's the MSRV for supported Tokio
+ - toolchain: 1.49.0
platform: windows-latest
- build-net-tokio: true
- build-no-std: true
- build-futures: true
- build-tx-sync: false
- - toolchain: beta
- build-net-tokio: true
- build-no-std: true
- build-futures: true
- build-tx-sync: true
- - toolchain: beta
- test-custom-message: true
- - toolchain: 1.41.1
- build-no-std: false
- test-log-variants: true
- build-futures: false
- build-tx-sync: false
- - toolchain: 1.45.2
- build-net-old-tokio: true
- build-net-tokio: true
- build-no-std: false
- build-futures: true
- build-tx-sync: false
- - toolchain: 1.47.0
- build-futures: true
- build-no-std: true
- build-tx-sync: false
+ # MacOS-latest requires 1.54.0 because that's what's required for linking to work properly
+ - toolchain: 1.54.0
+ platform: macos-latest
runs-on: ${{ matrix.platform }}
steps:
- name: Checkout source code
toolchain: ${{ matrix.toolchain }}
override: true
profile: minimal
- - name: Pin tokio to 1.14 for Rust 1.45
- if: "matrix.build-net-old-tokio"
- run: cargo update -p tokio --precise "1.14.0" --verbose
- env:
- CARGO_NET_GIT_FETCH_WITH_CLI: "true"
- - name: Build on Rust ${{ matrix.toolchain }} with net-tokio
- if: "matrix.build-net-tokio && !matrix.coverage"
- run: cargo build --verbose --color always
- - name: Build on Rust ${{ matrix.toolchain }} with net-tokio, and full code-linking for coverage generation
- if: matrix.coverage
- run: RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always
- - name: Build on Rust ${{ matrix.toolchain }}
- if: "! matrix.build-net-tokio"
+ - name: Install no-std-check dependencies for ARM Embedded
+ if: "matrix.platform == 'ubuntu-latest'"
run: |
- cargo build --verbose --color always -p lightning
- cargo build --verbose --color always -p lightning-invoice
- cargo build --verbose --color always -p lightning-persister
- - name: Build on Rust ${{ matrix.toolchain }} with all Log-Limiting features
- if: matrix.test-log-variants
- run: |
- cd lightning
- for FEATURE in $(cat Cargo.toml | grep '^max_level_' | awk '{ print $1 }'); do
- cargo build --verbose --color always --features $FEATURE
- done
- - name: Build Block Sync Clients on Rust ${{ matrix.toolchain }} with features
- if: "matrix.build-net-tokio && !matrix.coverage"
- run: |
- cd lightning-block-sync
- cargo build --verbose --color always --features rest-client
- cargo build --verbose --color always --features rpc-client
- cargo build --verbose --color always --features rpc-client,rest-client
- cargo build --verbose --color always --features rpc-client,rest-client,tokio
- - name: Build Block Sync Clients on Rust ${{ matrix.toolchain }} with features and full code-linking for coverage generation
- if: matrix.coverage
- run: |
- cd lightning-block-sync
- RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rest-client
- RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client
- RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client,rest-client
- RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client,rest-client,tokio
- - name: Build Transaction Sync Clients on Rust ${{ matrix.toolchain }} with features
- if: "matrix.build-tx-sync && !matrix.coverage"
- run: |
- cd lightning-transaction-sync
- cargo build --verbose --color always --features esplora-blocking
- cargo build --verbose --color always --features esplora-async
- cargo build --verbose --color always --features esplora-async-https
- - name: Build transaction sync clients on Rust ${{ matrix.toolchain }} with features and full code-linking for coverage generation
- if: "matrix.build-tx-sync && matrix.coverage"
- run: |
- cd lightning-transaction-sync
- RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features esplora-blocking
- RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features esplora-async
- RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features esplora-async-https
- - name: Test transaction sync clients on Rust ${{ matrix.toolchain }} with features
- if: "matrix.build-tx-sync"
- run: |
- cd lightning-transaction-sync
- cargo test --verbose --color always --features esplora-blocking
- cargo test --verbose --color always --features esplora-async
- cargo test --verbose --color always --features esplora-async-https
- - name: Test backtrace-debug builds on Rust ${{ matrix.toolchain }}
- if: "matrix.toolchain == 'stable'"
- shell: bash # Default on Winblows is powershell
- run: |
- cd lightning && RUST_BACKTRACE=1 cargo test --verbose --color always --features backtrace
- - name: Test on Rust ${{ matrix.toolchain }} with net-tokio
- if: "matrix.build-net-tokio && !matrix.coverage"
- run: cargo test --verbose --color always
- - name: Test on Rust ${{ matrix.toolchain }} with net-tokio, and full code-linking for coverage generation
- if: matrix.coverage
- run: RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always
- - name: Test no-std builds on Rust ${{ matrix.toolchain }}
- if: "matrix.build-no-std && !matrix.coverage"
- shell: bash # Default on Winblows is powershell
- run: |
- for DIR in lightning lightning-invoice lightning-rapid-gossip-sync; do
- cd $DIR
- cargo test --verbose --color always --no-default-features --features no-std
- # check if there is a conflict between no-std and the default std feature
- cargo test --verbose --color always --features no-std
- # check that things still pass without grind_signatures
- # note that outbound_commitment_test only runs in this mode, because of hardcoded signature values
- cargo test --verbose --color always --no-default-features --features std
- # check if there is a conflict between no-std and the c_bindings cfg
- RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always --no-default-features --features=no-std
- cd ..
- done
- # check no-std compatibility across dependencies
- cd no-std-check
- cargo check --verbose --color always --features lightning-transaction-sync
- - name: Build no-std-check on Rust ${{ matrix.toolchain }} for ARM Embedded
- if: "matrix.build-no-std && matrix.platform == 'ubuntu-latest'"
- run: |
- cd no-std-check
rustup target add thumbv7m-none-eabi
sudo apt-get -y install gcc-arm-none-eabi
- cargo build --target=thumbv7m-none-eabi
- - name: Test on no-std builds Rust ${{ matrix.toolchain }} and full code-linking for coverage generation
- if: "matrix.build-no-std && matrix.coverage"
+ - name: shellcheck the CI script
+ if: "matrix.platform == 'ubuntu-latest'"
run: |
- cd lightning
- RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --no-default-features --features no-std
- - name: Test futures builds on Rust ${{ matrix.toolchain }}
- if: "matrix.build-futures && !matrix.coverage"
+ sudo apt-get -y install shellcheck
+ shellcheck ci/ci-tests.sh
+ - name: Run CI script with coverage generation
+ if: matrix.coverage
shell: bash # Default on Winblows is powershell
- run: |
- cd lightning-background-processor
- cargo test --verbose --color always --no-default-features --features futures
- - name: Test futures builds on Rust ${{ matrix.toolchain }} and full code-linking for coverage generation
- if: "matrix.build-futures && matrix.coverage"
+ run: LDK_COVERAGE_BUILD=true ./ci/ci-tests.sh
+ - name: Run CI script
+ if: "!matrix.coverage"
shell: bash # Default on Winblows is powershell
- run: |
- cd lightning-background-processor
- RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --no-default-features --features futures
- - name: Test on Rust ${{ matrix.toolchain }}
- if: "! matrix.build-net-tokio"
- run: |
- cargo test --verbose --color always -p lightning
- cargo test --verbose --color always -p lightning-invoice
- cargo test --verbose --color always -p lightning-rapid-gossip-sync
- cargo test --verbose --color always -p lightning-persister
- cargo test --verbose --color always -p lightning-background-processor
- - name: Test C Bindings Modifications on Rust ${{ matrix.toolchain }}
- if: "! matrix.build-net-tokio"
- run: |
- RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always -p lightning
- RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always -p lightning-invoice
- RUSTFLAGS="--cfg=c_bindings" cargo build --verbose --color always -p lightning-persister
- RUSTFLAGS="--cfg=c_bindings" cargo build --verbose --color always -p lightning-background-processor
- - name: Test Block Sync Clients on Rust ${{ matrix.toolchain }} with features
- if: "matrix.build-net-tokio && !matrix.coverage"
- run: |
- cd lightning-block-sync
- cargo test --verbose --color always --features rest-client
- cargo test --verbose --color always --features rpc-client
- cargo test --verbose --color always --features rpc-client,rest-client
- cargo test --verbose --color always --features rpc-client,rest-client,tokio
- - name: Test Block Sync Clients on Rust ${{ matrix.toolchain }} with features and full code-linking for coverage generation
- if: matrix.coverage
- run: |
- cd lightning-block-sync
- RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --features rest-client
- RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --features rpc-client
- RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --features rpc-client,rest-client
- RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --features rpc-client,rest-client,tokio
- - name: Test Custom Message Macros on Rust ${{ matrix.toolchain }}
- if: "matrix.test-custom-message"
- run: |
- cd lightning-custom-message
- cargo test --verbose --color always
+ run: ./ci/ci-tests.sh
- name: Install deps for kcov
if: matrix.coverage
run: |
cargo check --no-default-features --features=no-std --release
cargo check --no-default-features --features=futures --release
cargo doc --release
+ RUSTDOCFLAGS="--cfg=anchors" cargo doc --release
+ - name: Run cargo check for Taproot build.
+ run: |
+ cargo check --release
+ cargo check --no-default-features --features=no-std --release
+ cargo check --no-default-features --features=futures --release
+ cargo doc --release
+ env:
+ RUSTFLAGS: '--cfg=anchors --cfg=taproot'
+ RUSTDOCFLAGS: '--cfg=anchors --cfg=taproot'
fuzz:
runs-on: ubuntu-latest
--- /dev/null
+#!/bin/bash
+set -eox pipefail
+
+RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }')
+HOST_PLATFORM="$(rustc --version --verbose | grep "host:" | awk '{ print $2 }')"
+
+# Tokio MSRV on versions 1.17 through 1.26 is rustc 1.49. Above 1.26 MSRV is 1.56.
+[ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p tokio --precise "1.14.0" --verbose
+[[ "$RUSTC_MINOR_VERSION" -gt 48 && "$RUSTC_MINOR_VERSION" -lt 56 ]] && cargo update -p tokio --precise "1.26.0" --verbose
+[ "$LDK_COVERAGE_BUILD" != "" ] && export RUSTFLAGS="-C link-dead-code"
+
+export RUST_BACKTRACE=1
+
+echo -e "\n\nBuilding and testing all workspace crates..."
+cargo build --verbose --color always
+cargo test --verbose --color always
+
+echo -e "\n\nBuilding with all Log-Limiting features"
+pushd lightning
+grep '^max_level_' Cargo.toml | awk '{ print $1 }'| while read -r FEATURE; do
+ cargo build --verbose --color always --features "$FEATURE"
+done
+popd
+
+if [ "$RUSTC_MINOR_VERSION" -gt 51 ]; then # Current `object` MSRV, subject to change
+ echo -e "\n\nTest backtrace-debug builds"
+ pushd lightning
+ cargo test --verbose --color always --features backtrace
+ popd
+fi
+
+echo -e "\n\nTesting no-std flags in various combinations"
+for DIR in lightning lightning-invoice lightning-rapid-gossip-sync; do
+ pushd $DIR
+ cargo test --verbose --color always --no-default-features --features no-std
+ # check if there is a conflict between no-std and the default std feature
+ cargo test --verbose --color always --features no-std
+ # check that things still pass without grind_signatures
+ # note that outbound_commitment_test only runs in this mode, because of hardcoded signature values
+ cargo test --verbose --color always --no-default-features --features std
+ # check if there is a conflict between no-std and the c_bindings cfg
+ RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always --no-default-features --features=no-std
+ popd
+done
+
+echo -e "\n\nTesting no-std build on a downstream no-std crate"
+# check no-std compatibility across dependencies
+pushd no-std-check
+cargo check --verbose --color always --features lightning-transaction-sync
+popd
+
+if [ -f "$(which arm-none-eabi-gcc)" ]; then
+ pushd no-std-check
+ cargo build --target=thumbv7m-none-eabi
+ popd
+fi
+
+echo -e "\n\nBuilding and testing Block Sync Clients with features"
+pushd lightning-block-sync
+cargo build --verbose --color always --features rest-client
+cargo test --verbose --color always --features rest-client
+cargo build --verbose --color always --features rpc-client
+cargo test --verbose --color always --features rpc-client
+cargo build --verbose --color always --features rpc-client,rest-client
+cargo test --verbose --color always --features rpc-client,rest-client
+cargo build --verbose --color always --features rpc-client,rest-client,tokio
+cargo test --verbose --color always --features rpc-client,rest-client,tokio
+popd
+
+if [[ $RUSTC_MINOR_VERSION -gt 67 && "$HOST_PLATFORM" != *windows* ]]; then
+ echo -e "\n\nBuilding and testing Transaction Sync Clients with features"
+ pushd lightning-transaction-sync
+ cargo build --verbose --color always --features esplora-blocking
+ cargo test --verbose --color always --features esplora-blocking
+ cargo build --verbose --color always --features esplora-async
+ cargo test --verbose --color always --features esplora-async
+ cargo build --verbose --color always --features esplora-async-https
+ cargo test --verbose --color always --features esplora-async-https
+ popd
+fi
+
+echo -e "\n\nTest futures builds"
+pushd lightning-background-processor
+cargo test --verbose --color always --no-default-features --features futures
+popd
+
+if [ "$RUSTC_MINOR_VERSION" -gt 55 ]; then
+ echo -e "\n\nTest Custom Message Macros"
+ pushd lightning-custom-message
+ cargo test --verbose --color always
+ popd
+fi
+
+echo -e "\n\nTest anchors builds"
+pushd lightning
+RUSTFLAGS="$RUSTFLAGS --cfg=anchors" cargo test --verbose --color always -p lightning
+echo -e "\n\nTest Taproot builds"
+RUSTFLAGS="$RUSTFLAGS --cfg=anchors --cfg=taproot" cargo test --verbose --color always -p lightning
+popd
use lightning::chain::transaction::OutPoint;
use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
use lightning::chain::keysinterface::{KeyMaterial, InMemorySigner, Recipient, EntropySource, NodeSigner, SignerProvider};
+use lightning::events;
+use lightning::events::MessageSendEventsProvider;
use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs, PaymentId};
use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
use lightning::ln::script::ShutdownScript;
use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
use lightning::util::errors::APIError;
-use lightning::util::events;
use lightning::util::logger::Logger;
use lightning::util::config::UserConfig;
-use lightning::util::events::MessageSendEventsProvider;
use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer};
use lightning::routing::router::{InFlightHtlcs, Route, RouteHop, RouteParameters, Router};
msg.clone()
} else { panic!("Wrong event type"); }
};
+ let events = $dest.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ if let events::Event::ChannelPending{ ref counterparty_node_id, .. } = events[0] {
+ assert_eq!(counterparty_node_id, &$source.get_our_node_id());
+ } else { panic!("Wrong event type"); }
+
$source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed);
+ let events = $source.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ if let events::Event::ChannelPending{ ref counterparty_node_id, .. } = events[0] {
+ assert_eq!(counterparty_node_id, &$dest.get_our_node_id());
+ } else { panic!("Wrong event type"); }
funding_output
} }
use lightning::chain::chainmonitor;
use lightning::chain::transaction::OutPoint;
use lightning::chain::keysinterface::{InMemorySigner, Recipient, KeyMaterial, EntropySource, NodeSigner, SignerProvider};
+use lightning::events::Event;
use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentId};
use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor,IgnoringMessageHandler};
use lightning::routing::scoring::FixedPenaltyScorer;
use lightning::util::config::UserConfig;
use lightning::util::errors::APIError;
-use lightning::util::events::Event;
use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
use lightning::util::logger::Logger;
use lightning::util::ser::{Readable, Writeable};
// 0085 3d00000000000000000000000000000000000000000000000000000000000000 0900000000000000000000000000000000000000000000000000000000000000 020b00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
//
// 07 - process the now-pending HTLC forward
- // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: SendHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000)
+ // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: UpdateHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000)
//
// - we respond with commitment_signed then revoke_and_ack (a weird, but valid, order)
// 030112 - inbound read from peer id 1 of len 18
inbound_htlc_minimum_msat: None,
inbound_htlc_maximum_msat: None,
config: None,
+ feerate_sat_per_1000_weight: None,
});
}
Some(&first_hops_vec[..])
rustdoc-args = ["--cfg", "docsrs"]
[features]
-futures = [ "futures-util" ]
+futures = [ ]
std = ["lightning/std", "lightning-rapid-gossip-sync/std"]
default = ["std"]
bitcoin = { version = "0.29.0", default-features = false }
lightning = { version = "0.0.114", path = "../lightning", default-features = false }
lightning-rapid-gossip-sync = { version = "0.0.114", path = "../lightning-rapid-gossip-sync", default-features = false }
-futures-util = { version = "0.3", default-features = false, features = ["async-await-macro"], optional = true }
[dev-dependencies]
+tokio = { version = "1.14", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] }
lightning = { version = "0.0.114", path = "../lightning", features = ["_test_utils"] }
lightning-invoice = { version = "0.22.0", path = "../lightning-invoice" }
lightning-persister = { version = "0.0.114", path = "../lightning-persister" }
#![deny(private_intra_doc_links)]
#![deny(missing_docs)]
-#![deny(unsafe_code)]
+#![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use lightning::chain::chainmonitor::{ChainMonitor, Persist};
use lightning::chain::keysinterface::{EntropySource, NodeSigner, SignerProvider};
+use lightning::events::{Event, PathFailure};
+#[cfg(feature = "std")]
+use lightning::events::{EventHandler, EventsProvider};
use lightning::ln::channelmanager::ChannelManager;
use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
use lightning::routing::utxo::UtxoLookup;
use lightning::routing::router::Router;
use lightning::routing::scoring::{Score, WriteableScore};
-use lightning::util::events::{Event, PathFailure};
-#[cfg(feature = "std")]
-use lightning::util::events::{EventHandler, EventsProvider};
use lightning::util::logger::Logger;
use lightning::util::persist::Persister;
+#[cfg(feature = "std")]
+use lightning::util::wakers::Sleeper;
use lightning_rapid_gossip_sync::RapidGossipSync;
use core::ops::Deref;
#[cfg(feature = "std")]
use std::time::Instant;
-#[cfg(feature = "futures")]
-use futures_util::{select_biased, future::FutureExt, task};
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
/// unilateral chain closure fees are at risk.
///
/// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
-/// [`Event`]: lightning::util::events::Event
+/// [`Event`]: lightning::events::Event
#[cfg(feature = "std")]
#[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
pub struct BackgroundProcessor {
#[cfg(test)]
const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
+#[cfg(feature = "futures")]
+/// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
+const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
+#[cfg(feature = "futures")]
+const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
+ min_u64(SCORER_PERSIST_TIMER, FIRST_NETWORK_PRUNE_TIMER));
+
/// Either [`P2PGossipSync`] or [`RapidGossipSync`].
pub enum GossipSync<
P: Deref<Target = P2PGossipSync<G, U, L>>,
}
}
-/// (C-not exported) as the bindings concretize everything and have constructors for us
+/// This is not exported to bindings users as the bindings concretize everything and have constructors for us
impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
where
}
}
-/// (C-not exported) as the bindings concretize everything and have constructors for us
+/// This is not exported to bindings users as the bindings concretize everything and have constructors for us
impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
GossipSync<
&P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
}
}
-/// (C-not exported) as the bindings concretize everything and have constructors for us
+/// This is not exported to bindings users as the bindings concretize everything and have constructors for us
impl<'a, L: Deref>
GossipSync<
&P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
$channel_manager: ident, $process_channel_manager_events: expr,
$gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
- $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr)
+ $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
+ $check_slow_await: expr)
=> { {
log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
$channel_manager.timer_tick_occurred();
// We wait up to 100ms, but track how long it takes to detect being put to sleep,
// see `await_start`'s use below.
- let mut await_start = $get_timer(1);
+ let mut await_start = None;
+ if $check_slow_await { await_start = Some($get_timer(1)); }
let updates_available = $await;
- let await_slow = $timer_elapsed(&mut await_start, 1);
+ let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
if updates_available {
log_trace!($logger, "Persisting ChannelManager...");
} }
}
+#[cfg(feature = "futures")]
+pub(crate) mod futures_util {
+ use core::future::Future;
+ use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
+ use core::pin::Pin;
+ use core::marker::Unpin;
+ pub(crate) struct Selector<
+ A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
+ > {
+ pub a: A,
+ pub b: B,
+ pub c: C,
+ }
+ pub(crate) enum SelectorOutput {
+ A, B, C(bool),
+ }
+
+ impl<
+ A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
+ > Future for Selector<A, B, C> {
+ type Output = SelectorOutput;
+ fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
+ match Pin::new(&mut self.a).poll(ctx) {
+ Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
+ Poll::Pending => {},
+ }
+ match Pin::new(&mut self.b).poll(ctx) {
+ Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
+ Poll::Pending => {},
+ }
+ match Pin::new(&mut self.c).poll(ctx) {
+ Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
+ Poll::Pending => {},
+ }
+ Poll::Pending
+ }
+ }
+
+ // If we want to poll a future without an async context to figure out if it has completed or
+ // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
+ // but sadly there's a good bit of boilerplate here.
+ fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
+ fn dummy_waker_action(_: *const ()) { }
+
+ const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
+ dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
+ pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
+}
+#[cfg(feature = "futures")]
+use futures_util::{Selector, SelectorOutput, dummy_waker};
+#[cfg(feature = "futures")]
+use core::task;
+
/// Processes background events in a future.
///
/// `sleeper` should return a future which completes in the given amount of time and returns a
/// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
/// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
/// manually instead.
+///
+/// The `mobile_interruptable_platform` flag should be set if we're currently running on a
+/// mobile device, where we may need to check for interruption of the application regularly. If you
+/// are unsure, you should set the flag, as the performance impact of it is minimal unless there
+/// are hundreds or thousands of simultaneous process calls running.
#[cfg(feature = "futures")]
pub async fn process_events_async<
'a,
>(
persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
- sleeper: Sleeper,
+ sleeper: Sleeper, mobile_interruptable_platform: bool,
) -> Result<(), lightning::io::Error>
where
UL::Target: 'static + UtxoLookup,
UMH::Target: 'static + CustomMessageHandler,
PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
{
- let mut should_break = true;
+ let mut should_break = false;
let async_event_handler = |event| {
let network_graph = gossip_sync.network_graph();
let event_handler = &event_handler;
chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
gossip_sync, peer_manager, logger, scorer, should_break, {
- select_biased! {
- _ = channel_manager.get_persistable_update_future().fuse() => true,
- exit = sleeper(Duration::from_millis(100)).fuse() => {
+ let fut = Selector {
+ a: channel_manager.get_persistable_update_future(),
+ b: chain_monitor.get_update_future(),
+ c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
+ };
+ match fut.await {
+ SelectorOutput::A => true,
+ SelectorOutput::B => false,
+ SelectorOutput::C(exit) => {
should_break = exit;
false
}
}
}, |t| sleeper(Duration::from_secs(t)),
|fut: &mut SleepFuture, _| {
- let mut waker = task::noop_waker();
+ let mut waker = dummy_waker();
let mut ctx = task::Context::from_waker(&mut waker);
- core::pin::Pin::new(fut).poll(&mut ctx).is_ready()
- })
+ match core::pin::Pin::new(fut).poll(&mut ctx) {
+ task::Poll::Ready(exit) => { should_break = exit; true },
+ task::Poll::Pending => false,
+ }
+ }, mobile_interruptable_platform)
}
#[cfg(feature = "std")]
define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
channel_manager, channel_manager.process_pending_events(&event_handler),
gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
- channel_manager.await_persistable_update_timeout(Duration::from_millis(100)),
- |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur)
+ Sleeper::from_two_futures(
+ channel_manager.get_persistable_update_future(),
+ chain_monitor.get_update_future()
+ ).wait_timeout(Duration::from_millis(100)),
+ |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
});
Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
}
use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
use lightning::chain::keysinterface::{InMemorySigner, KeysManager};
use lightning::chain::transaction::OutPoint;
- use lightning::get_event_msg;
+ use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
+ use lightning::{get_event_msg, get_event};
use lightning::ln::PaymentHash;
use lightning::ln::channelmanager;
use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
use lightning::routing::router::{DefaultRouter, RouteHop};
use lightning::routing::scoring::{ChannelUsage, Score};
use lightning::util::config::UserConfig;
- use lightning::util::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
use lightning::util::ser::Writeable;
use lightning::util::test_utils;
use lightning::util::persist::KVStorePersister;
($node_a: expr, $node_b: expr, $temporary_channel_id: expr, $tx: expr) => {{
$node_a.node.funding_transaction_generated(&$temporary_channel_id, &$node_b.node.get_our_node_id(), $tx.clone()).unwrap();
$node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
+ get_event!($node_b, Event::ChannelPending);
+
$node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
+ get_event!($node_a, Event::ChannelPending);
}}
}
let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "scorer".to_string());
check_persisted_data!(nodes[0].scorer, filepath.clone());
- assert!(bg_processor.stop().is_ok());
+ if !std::thread::panicking() {
+ bg_processor.stop().unwrap();
+ }
}
#[test]
}
}
- assert!(bg_processor.stop().is_ok());
+ if !std::thread::panicking() {
+ bg_processor.stop().unwrap();
+ }
}
#[test]
}
}
+ #[tokio::test]
+ #[cfg(feature = "futures")]
+ async fn test_channel_manager_persist_error_async() {
+ // Test that if we encounter an error during manager persistence, the thread panics.
+ let nodes = create_nodes(2, "test_persist_error_sync".to_string());
+ open_channel!(nodes[0], nodes[1], 100000);
+
+ let data_dir = nodes[0].persister.get_data_dir();
+ let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
+
+ let bp_future = super::process_events_async(
+ persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
+ nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
+ Some(nodes[0].scorer.clone()), move |dur: Duration| {
+ Box::pin(async move {
+ tokio::time::sleep(dur).await;
+ false // Never exit
+ })
+ }, false,
+ );
+ match bp_future.await {
+ Ok(_) => panic!("Expected error persisting manager"),
+ Err(e) => {
+ assert_eq!(e.kind(), std::io::ErrorKind::Other);
+ assert_eq!(e.get_ref().unwrap().to_string(), "test");
+ },
+ }
+ }
+
#[test]
fn test_network_graph_persist_error() {
// Test that if we encounter an error during network graph persistence, an error gets returned.
nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
- assert!(bg_processor.stop().is_ok());
+ if !std::thread::panicking() {
+ bg_processor.stop().unwrap();
+ }
// Set up a background event handler for SpendableOutputs events.
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
_ => panic!("Unexpected event: {:?}", event),
}
- assert!(bg_processor.stop().is_ok());
+ if !std::thread::panicking() {
+ bg_processor.stop().unwrap();
+ }
}
#[test]
}
}
- assert!(bg_processor.stop().is_ok());
+ if !std::thread::panicking() {
+ bg_processor.stop().unwrap();
+ }
+ }
+
+ macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
+ ($nodes: expr, $receive: expr, $sleep: expr) => {
+ let features = ChannelFeatures::empty();
+ $nodes[0].network_graph.add_channel_from_partial_announcement(
+ 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
+ ).expect("Failed to update channel from partial announcement");
+ let original_graph_description = $nodes[0].network_graph.to_string();
+ assert!(original_graph_description.contains("42: features: 0000, node_one:"));
+ assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
+
+ loop {
+ $sleep;
+ let log_entries = $nodes[0].logger.lines.lock().unwrap();
+ let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
+ if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
+ .unwrap_or(&0) > 1
+ {
+ // Wait until the loop has gone around at least twice.
+ break
+ }
+ }
+
+ let initialization_input = vec![
+ 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
+ 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
+ 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
+ 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
+ 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
+ 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
+ 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
+ 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
+ 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
+ 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
+ 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
+ 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
+ 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
+ ];
+ $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
+
+ // this should have added two channels
+ assert_eq!($nodes[0].network_graph.read_only().channels().len(), 3);
+
+ $receive.expect("Network graph not pruned within deadline");
+
+ // all channels should now be pruned
+ assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
+ }
}
#[test]
fn test_not_pruning_network_graph_until_graph_sync_completion() {
+ let (sender, receiver) = std::sync::mpsc::sync_channel(1);
+
let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion".to_string());
let data_dir = nodes[0].persister.get_data_dir();
- let (sender, receiver) = std::sync::mpsc::sync_channel(1);
let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
- let network_graph = nodes[0].network_graph.clone();
- let features = ChannelFeatures::empty();
- network_graph.add_channel_from_partial_announcement(42, 53, features, nodes[0].node.get_our_node_id(), nodes[1].node.get_our_node_id())
- .expect("Failed to update channel from partial announcement");
- let original_graph_description = network_graph.to_string();
- assert!(original_graph_description.contains("42: features: 0000, node_one:"));
- assert_eq!(network_graph.read_only().channels().len(), 1);
let event_handler = |_: _| {};
let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
- loop {
- let log_entries = nodes[0].logger.lines.lock().unwrap();
- let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
- if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
- .unwrap_or(&0) > 1
- {
- // Wait until the loop has gone around at least twice.
- break
+ do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
+ receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
+ std::thread::sleep(Duration::from_millis(1)));
+
+ background_processor.stop().unwrap();
+ }
+
+ #[tokio::test]
+ #[cfg(feature = "futures")]
+ async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
+ let (sender, receiver) = std::sync::mpsc::sync_channel(1);
+
+ let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async".to_string());
+ let data_dir = nodes[0].persister.get_data_dir();
+ let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
+
+ let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
+ let bp_future = super::process_events_async(
+ persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
+ nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
+ Some(nodes[0].scorer.clone()), move |dur: Duration| {
+ let mut exit_receiver = exit_receiver.clone();
+ Box::pin(async move {
+ tokio::select! {
+ _ = tokio::time::sleep(dur) => false,
+ _ = exit_receiver.changed() => true,
+ }
+ })
+ }, false,
+ );
+ // TODO: Drop _local and simply spawn after #2003
+ let local_set = tokio::task::LocalSet::new();
+ local_set.spawn_local(bp_future);
+ local_set.spawn_local(async move {
+ do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
+ let mut i = 0;
+ loop {
+ tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
+ if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
+ assert!(i < 5);
+ i += 1;
+ }
+ }, tokio::time::sleep(Duration::from_millis(1)).await);
+ exit_sender.send(()).unwrap();
+ });
+ local_set.await;
+ }
+
+ macro_rules! do_test_payment_path_scoring {
+ ($nodes: expr, $receive: expr) => {
+ // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
+ // that we update the scorer upon a payment path succeeding (note that the channel must be
+ // public or else we won't score it).
+ // A background event handler for FundingGenerationReady events must be hooked up to a
+ // running background processor.
+ let scored_scid = 4242;
+ let secp_ctx = Secp256k1::new();
+ let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
+ let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
+
+ let path = vec![RouteHop {
+ pubkey: node_1_id,
+ node_features: NodeFeatures::empty(),
+ short_channel_id: scored_scid,
+ channel_features: ChannelFeatures::empty(),
+ fee_msat: 0,
+ cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
+ }];
+
+ $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
+ $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
+ payment_id: None,
+ payment_hash: PaymentHash([42; 32]),
+ payment_failed_permanently: false,
+ failure: PathFailure::OnPath { network_update: None },
+ path: path.clone(),
+ short_channel_id: Some(scored_scid),
+ });
+ let event = $receive.expect("PaymentPathFailed not handled within deadline");
+ match event {
+ Event::PaymentPathFailed { .. } => {},
+ _ => panic!("Unexpected event"),
}
- }
- let initialization_input = vec![
- 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
- 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
- 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
- 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
- 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
- 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
- 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
- 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
- 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
- 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
- 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
- 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
- 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
- ];
- nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
-
- // this should have added two channels
- assert_eq!(network_graph.read_only().channels().len(), 3);
-
- receiver
- .recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5))
- .expect("Network graph not pruned within deadline");
+ // Ensure we'll score payments that were explicitly failed back by the destination as
+ // ProbeSuccess.
+ $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
+ $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
+ payment_id: None,
+ payment_hash: PaymentHash([42; 32]),
+ payment_failed_permanently: true,
+ failure: PathFailure::OnPath { network_update: None },
+ path: path.clone(),
+ short_channel_id: None,
+ });
+ let event = $receive.expect("PaymentPathFailed not handled within deadline");
+ match event {
+ Event::PaymentPathFailed { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
- background_processor.stop().unwrap();
+ $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentSuccess { path: path.clone() });
+ $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
+ payment_id: PaymentId([42; 32]),
+ payment_hash: None,
+ path: path.clone(),
+ });
+ let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
+ match event {
+ Event::PaymentPathSuccessful { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+
+ $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
+ $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
+ payment_id: PaymentId([42; 32]),
+ payment_hash: PaymentHash([42; 32]),
+ path: path.clone(),
+ });
+ let event = $receive.expect("ProbeSuccessful not handled within deadline");
+ match event {
+ Event::ProbeSuccessful { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
- // all channels should now be pruned
- assert_eq!(network_graph.read_only().channels().len(), 0);
+ $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeFailure { path: path.clone() });
+ $nodes[0].node.push_pending_event(Event::ProbeFailed {
+ payment_id: PaymentId([42; 32]),
+ payment_hash: PaymentHash([42; 32]),
+ path,
+ short_channel_id: Some(scored_scid),
+ });
+ let event = $receive.expect("ProbeFailure not handled within deadline");
+ match event {
+ Event::ProbeFailed { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ }
}
#[test]
fn test_payment_path_scoring() {
- // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
- // that we update the scorer upon a payment path succeeding (note that the channel must be
- // public or else we won't score it).
- // Set up a background event handler for FundingGenerationReady events.
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
let event_handler = move |event: Event| match event {
Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
let persister = Arc::new(Persister::new(data_dir));
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
- let scored_scid = 4242;
- let secp_ctx = Secp256k1::new();
- let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
- let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
-
- let path = vec![RouteHop {
- pubkey: node_1_id,
- node_features: NodeFeatures::empty(),
- short_channel_id: scored_scid,
- channel_features: ChannelFeatures::empty(),
- fee_msat: 0,
- cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
- }];
-
- nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
- nodes[0].node.push_pending_event(Event::PaymentPathFailed {
- payment_id: None,
- payment_hash: PaymentHash([42; 32]),
- payment_failed_permanently: false,
- failure: PathFailure::OnPath { network_update: None },
- path: path.clone(),
- short_channel_id: Some(scored_scid),
- retry: None,
- });
- let event = receiver
- .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
- .expect("PaymentPathFailed not handled within deadline");
- match event {
- Event::PaymentPathFailed { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
- // Ensure we'll score payments that were explicitly failed back by the destination as
- // ProbeSuccess.
- nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
- nodes[0].node.push_pending_event(Event::PaymentPathFailed {
- payment_id: None,
- payment_hash: PaymentHash([42; 32]),
- payment_failed_permanently: true,
- failure: PathFailure::OnPath { network_update: None },
- path: path.clone(),
- short_channel_id: None,
- retry: None,
- });
- let event = receiver
- .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
- .expect("PaymentPathFailed not handled within deadline");
- match event {
- Event::PaymentPathFailed { .. } => {},
- _ => panic!("Unexpected event"),
+ if !std::thread::panicking() {
+ bg_processor.stop().unwrap();
}
+ }
- nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentSuccess { path: path.clone() });
- nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
- payment_id: PaymentId([42; 32]),
- payment_hash: None,
- path: path.clone(),
- });
- let event = receiver
- .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
- .expect("PaymentPathSuccessful not handled within deadline");
- match event {
- Event::PaymentPathSuccessful { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ #[tokio::test]
+ #[cfg(feature = "futures")]
+ async fn test_payment_path_scoring_async() {
+ let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
+ let event_handler = move |event: Event| {
+ let sender_ref = sender.clone();
+ async move {
+ match event {
+ Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
+ Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
+ Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
+ Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
+ _ => panic!("Unexpected event: {:?}", event),
+ }
+ }
+ };
- nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
- nodes[0].node.push_pending_event(Event::ProbeSuccessful {
- payment_id: PaymentId([42; 32]),
- payment_hash: PaymentHash([42; 32]),
- path: path.clone(),
- });
- let event = receiver
- .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
- .expect("ProbeSuccessful not handled within deadline");
- match event {
- Event::ProbeSuccessful { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ let nodes = create_nodes(1, "test_payment_path_scoring_async".to_string());
+ let data_dir = nodes[0].persister.get_data_dir();
+ let persister = Arc::new(Persister::new(data_dir));
- nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeFailure { path: path.clone() });
- nodes[0].node.push_pending_event(Event::ProbeFailed {
- payment_id: PaymentId([42; 32]),
- payment_hash: PaymentHash([42; 32]),
- path,
- short_channel_id: Some(scored_scid),
+ let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
+
+ let bp_future = super::process_events_async(
+ persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
+ nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
+ Some(nodes[0].scorer.clone()), move |dur: Duration| {
+ let mut exit_receiver = exit_receiver.clone();
+ Box::pin(async move {
+ tokio::select! {
+ _ = tokio::time::sleep(dur) => false,
+ _ = exit_receiver.changed() => true,
+ }
+ })
+ }, false,
+ );
+ // TODO: Drop _local and simply spawn after #2003
+ let local_set = tokio::task::LocalSet::new();
+ local_set.spawn_local(bp_future);
+ local_set.spawn_local(async move {
+ do_test_payment_path_scoring!(nodes, receiver.recv().await);
+ exit_sender.send(()).unwrap();
});
- let event = receiver
- .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
- .expect("ProbeFailure not handled within deadline");
- match event {
- Event::ProbeFailed { .. } => {},
- _ => panic!("Unexpected event"),
- }
-
- assert!(bg_processor.stop().is_ok());
+ local_set.await;
}
}
rustdoc-args = ["--cfg", "docsrs"]
[features]
-rest-client = [ "serde", "serde_json", "chunked_transfer" ]
-rpc-client = [ "serde", "serde_json", "chunked_transfer" ]
+rest-client = [ "serde_json", "chunked_transfer" ]
+rpc-client = [ "serde_json", "chunked_transfer" ]
[dependencies]
bitcoin = "0.29.0"
lightning = { version = "0.0.114", path = "../lightning" }
-futures-util = { version = "0.3" }
tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
-serde = { version = "1.0", features = ["derive"], optional = true }
serde_json = { version = "1.0", optional = true }
chunked_transfer = { version = "1.4", optional = true }
[dev-dependencies]
lightning = { version = "0.0.114", path = "../lightning", features = ["_test_utils"] }
-tokio = { version = "~1.14", features = [ "macros", "rt" ] }
+tokio = { version = "1.14", features = [ "macros", "rt" ] }
use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::consensus::encode;
use bitcoin::hash_types::{BlockHash, TxMerkleNode, Txid};
-use bitcoin::hashes::hex::{FromHex, ToHex};
+use bitcoin::hashes::hex::FromHex;
use bitcoin::Transaction;
-use serde::Deserialize;
-
use serde_json;
use std::convert::From;
type Error = std::io::Error;
fn try_into(self) -> std::io::Result<BlockHeaderData> {
- let mut header = match self.0 {
+ let header = match self.0 {
serde_json::Value::Array(mut array) if !array.is_empty() => array.drain(..).next().unwrap(),
serde_json::Value::Object(_) => self.0,
_ => return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "unexpected JSON type")),
}
// Add an empty previousblockhash for the genesis block.
- if let None = header.get("previousblockhash") {
- let hash: BlockHash = BlockHash::all_zeros();
- header.as_object_mut().unwrap().insert("previousblockhash".to_string(), serde_json::json!(hash.to_hex()));
- }
-
- match serde_json::from_value::<GetHeaderResponse>(header) {
- Err(_) => Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid header response")),
- Ok(response) => match response.try_into() {
- Err(_) => Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid header data")),
- Ok(header) => Ok(header),
- },
+ match header.try_into() {
+ Err(_) => Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid header data")),
+ Ok(header) => Ok(header),
}
}
}
-/// Response data from `getblockheader` RPC and `headers` REST requests.
-#[derive(Deserialize)]
-struct GetHeaderResponse {
- pub version: i32,
- pub merkleroot: String,
- pub time: u32,
- pub nonce: u32,
- pub bits: String,
- pub previousblockhash: String,
-
- pub chainwork: String,
- pub height: u32,
-}
+impl TryFrom<serde_json::Value> for BlockHeaderData {
+ type Error = ();
-/// Converts from `GetHeaderResponse` to `BlockHeaderData`.
-impl TryFrom<GetHeaderResponse> for BlockHeaderData {
- type Error = bitcoin::hashes::hex::Error;
+ fn try_from(response: serde_json::Value) -> Result<Self, ()> {
+ macro_rules! get_field { ($name: expr, $ty_access: tt) => {
+ response.get($name).ok_or(())?.$ty_access().ok_or(())?
+ } }
- fn try_from(response: GetHeaderResponse) -> Result<Self, bitcoin::hashes::hex::Error> {
Ok(BlockHeaderData {
header: BlockHeader {
- version: response.version,
- prev_blockhash: BlockHash::from_hex(&response.previousblockhash)?,
- merkle_root: TxMerkleNode::from_hex(&response.merkleroot)?,
- time: response.time,
- bits: u32::from_be_bytes(<[u8; 4]>::from_hex(&response.bits)?),
- nonce: response.nonce,
+ version: get_field!("version", as_i64).try_into().map_err(|_| ())?,
+ prev_blockhash: if let Some(hash_str) = response.get("previousblockhash") {
+ BlockHash::from_hex(hash_str.as_str().ok_or(())?).map_err(|_| ())?
+ } else { BlockHash::all_zeros() },
+ merkle_root: TxMerkleNode::from_hex(get_field!("merkleroot", as_str)).map_err(|_| ())?,
+ time: get_field!("time", as_u64).try_into().map_err(|_| ())?,
+ bits: u32::from_be_bytes(<[u8; 4]>::from_hex(get_field!("bits", as_str)).map_err(|_| ())?),
+ nonce: get_field!("nonce", as_u64).try_into().map_err(|_| ())?,
},
- chainwork: hex_to_uint256(&response.chainwork)?,
- height: response.height,
+ chainwork: hex_to_uint256(get_field!("chainwork", as_str)).map_err(|_| ())?,
+ height: get_field!("height", as_u64).try_into().map_err(|_| ())?,
})
}
}
use super::*;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::hashes::Hash;
+ use bitcoin::hashes::hex::ToHex;
use bitcoin::network::constants::Network;
use serde_json::value::Number;
use serde_json::Value;
match TryInto::<BlockHeaderData>::try_into(response) {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
- assert_eq!(e.get_ref().unwrap().to_string(), "invalid header response");
+ assert_eq!(e.get_ref().unwrap().to_string(), "invalid header data");
},
Ok(_) => panic!("Expected error"),
}
use bitcoin::hash_types::BlockHash;
use bitcoin::hashes::hex::ToHex;
-use futures_util::lock::Mutex;
-
use std::convert::TryFrom;
use std::convert::TryInto;
+use std::sync::Mutex;
/// A simple REST client for requesting resources using HTTP `GET`.
pub struct RestClient {
endpoint: HttpEndpoint,
- client: Mutex<HttpClient>,
+ client: Mutex<Option<HttpClient>>,
}
impl RestClient {
///
/// The endpoint should contain the REST path component (e.g., http://127.0.0.1:8332/rest).
pub fn new(endpoint: HttpEndpoint) -> std::io::Result<Self> {
- let client = Mutex::new(HttpClient::connect(&endpoint)?);
- Ok(Self { endpoint, client })
+ Ok(Self { endpoint, client: Mutex::new(None) })
}
/// Requests a resource encoded in `F` format and interpreted as type `T`.
where F: TryFrom<Vec<u8>, Error = std::io::Error> + TryInto<T, Error = std::io::Error> {
let host = format!("{}:{}", self.endpoint.host(), self.endpoint.port());
let uri = format!("{}/{}", self.endpoint.path().trim_end_matches("/"), resource_path);
- self.client.lock().await.get::<F>(&uri, &host).await?.try_into()
+ let mut client = if let Some(client) = self.client.lock().unwrap().take() { client }
+ else { HttpClient::connect(&self.endpoint)? };
+ let res = client.get::<F>(&uri, &host).await?.try_into();
+ *self.client.lock().unwrap() = Some(client);
+ res
}
}
use bitcoin::hash_types::BlockHash;
use bitcoin::hashes::hex::ToHex;
-use futures_util::lock::Mutex;
+use std::sync::Mutex;
use serde_json;
pub struct RpcClient {
basic_auth: String,
endpoint: HttpEndpoint,
- client: Mutex<HttpClient>,
+ client: Mutex<Option<HttpClient>>,
id: AtomicUsize,
}
/// credentials should be a base64 encoding of a user name and password joined by a colon, as is
/// required for HTTP basic access authentication.
pub fn new(credentials: &str, endpoint: HttpEndpoint) -> std::io::Result<Self> {
- let client = Mutex::new(HttpClient::connect(&endpoint)?);
Ok(Self {
basic_auth: "Basic ".to_string() + credentials,
endpoint,
- client,
+ client: Mutex::new(None),
id: AtomicUsize::new(0),
})
}
"id": &self.id.fetch_add(1, Ordering::AcqRel).to_string()
});
- let mut response = match self.client.lock().await.post::<JsonResponse>(&uri, &host, &self.basic_auth, content).await {
+ let mut client = if let Some(client) = self.client.lock().unwrap().take() { client }
+ else { HttpClient::connect(&self.endpoint)? };
+ let http_response = client.post::<JsonResponse>(&uri, &host, &self.basic_auth, content).await;
+ *self.client.lock().unwrap() = Some(client);
+
+ let mut response = match http_response {
Ok(JsonResponse(response)) => response,
Err(e) if e.kind() == std::io::ErrorKind::Other => {
match e.get_ref().unwrap().downcast_ref::<HttpError>() {
/// * `H`: exactly one `PaymentHash`
/// * `T`: the timestamp is set
///
-/// (C-not exported) as we likely need to manually select one set of boolean type parameters.
+/// This is not exported to bindings users as we likely need to manually select one set of boolean type parameters.
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct InvoiceBuilder<D: tb::Bool, H: tb::Bool, T: tb::Bool, C: tb::Bool, S: tb::Bool> {
currency: Currency,
/// Represents the description of an invoice which has to be either a directly included string or
/// a hash of a description provided out of band.
///
-/// (C-not exported) As we don't have a good way to map the reference lifetimes making this
+/// This is not exported to bindings users as we don't have a good way to map the reference lifetimes making this
/// practically impossible to use safely in languages like C.
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum InvoiceDescription<'f> {
/// Data of the `RawInvoice` that is encoded in the human readable part
///
-/// (C-not exported) As we don't yet support `Option<Enum>`
+/// This is not exported to bindings users as we don't yet support `Option<Enum>`
#[derive(Eq, PartialEq, Debug, Clone, Hash)]
pub struct RawHrp {
/// The currency deferred from the 3rd and 4th character of the bech32 transaction
/// Returns all enum variants of `SiPrefix` sorted in descending order of their associated
/// multiplier.
///
- /// (C-not exported) As we don't yet support a slice of enums, and also because this function
+ /// This is not exported to bindings users as we don't yet support a slice of enums, and also because this function
/// isn't the most critical to expose.
pub fn values_desc() -> &'static [SiPrefix] {
use crate::SiPrefix::*;
/// Tagged field which may have an unknown tag
///
-/// (C-not exported) as we don't currently support TaggedField
+/// This is not exported to bindings users as we don't currently support TaggedField
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub enum RawTaggedField {
/// Parsed tagged field with known tag
///
/// For descriptions of the enum values please refer to the enclosed type's docs.
///
-/// (C-not exported) As we don't yet support enum variants with the same name the struct contained
+/// This is not exported to bindings users as we don't yet support enum variants with the same name the struct contained
/// in the variant.
#[allow(missing_docs)]
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
/// SHA-256 hash
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
-pub struct Sha256(/// (C-not exported) as the native hash types are not currently mapped
+pub struct Sha256(/// This is not exported to bindings users as the native hash types are not currently mapped
pub sha256::Hash);
/// Description string
self.tagged_fields.push(TaggedField::DescriptionHash(Sha256(description_hash)));
self.set_flags()
}
+
+ /// Set the description or description hash. This function is only available if no description (hash) was set.
+ pub fn invoice_description(self, description: InvoiceDescription) -> InvoiceBuilder<tb::True, H, T, C, S> {
+ match description {
+ InvoiceDescription::Direct(desc) => {
+ self.description(desc.clone().into_inner())
+ }
+ InvoiceDescription::Hash(hash) => {
+ self.description_hash(hash.0)
+ }
+ }
+ }
}
impl<D: tb::Bool, T: tb::Bool, C: tb::Bool, S: tb::Bool> InvoiceBuilder<D, tb::False, T, C, S> {
/// of type `E`. Since the signature of a `SignedRawInvoice` is not required to be valid there
/// are no constraints regarding the validity of the produced signature.
///
- /// (C-not exported) As we don't currently support passing function pointers into methods
+ /// This is not exported to bindings users as we don't currently support passing function pointers into methods
/// explicitly.
pub fn sign<F, E>(self, sign_method: F) -> Result<SignedRawInvoice, E>
where F: FnOnce(&Message) -> Result<RecoverableSignature, E>
/// Returns an iterator over all tagged fields with known semantics.
///
- /// (C-not exported) As there is not yet a manual mapping for a FilterMap
+ /// This is not exported to bindings users as there is not yet a manual mapping for a FilterMap
pub fn known_tagged_fields(&self)
-> FilterMap<Iter<RawTaggedField>, fn(&RawTaggedField) -> Option<&TaggedField>>
{
find_extract!(self.known_tagged_fields(), TaggedField::Features(ref x), x)
}
- /// (C-not exported) as we don't support Vec<&NonOpaqueType>
+ /// This is not exported to bindings users as we don't support Vec<&NonOpaqueType>
pub fn fallbacks(&self) -> Vec<&Fallback> {
find_all_extract!(self.known_tagged_fields(), TaggedField::Fallback(ref x), x).collect()
}
/// Returns an iterator over all tagged fields of this Invoice.
///
- /// (C-not exported) As there is not yet a manual mapping for a FilterMap
+ /// This is not exported to bindings users as there is not yet a manual mapping for a FilterMap
pub fn tagged_fields(&self)
-> FilterMap<Iter<RawTaggedField>, fn(&RawTaggedField) -> Option<&TaggedField>> {
self.signed_invoice.raw_invoice().known_tagged_fields()
/// Return the description or a hash of it for longer ones
///
- /// (C-not exported) because we don't yet export InvoiceDescription
+ /// This is not exported to bindings users because we don't yet export InvoiceDescription
pub fn description(&self) -> InvoiceDescription {
if let Some(direct) = self.signed_invoice.description() {
return InvoiceDescription::Direct(direct);
self.signed_invoice.recover_payee_pub_key().expect("was checked by constructor").0
}
+ /// Returns the Duration since the Unix epoch at which the invoice expires.
+ /// Returning None if overflow occurred.
+ pub fn expires_at(&self) -> Option<Duration> {
+ self.duration_since_epoch().checked_add(self.expiry_time())
+ }
+
/// Returns the invoice's expiry time, if present, otherwise [`DEFAULT_EXPIRY_TIME`].
pub fn expiry_time(&self) -> Duration {
self.signed_invoice.expiry_time()
}
}
+ /// Returns the Duration remaining until the invoice expires.
+ #[cfg(feature = "std")]
+ pub fn duration_until_expiry(&self) -> Duration {
+ SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
+ .map(|now| self.expiration_remaining_from_epoch(now))
+ .unwrap_or(Duration::from_nanos(0))
+ }
+
+ /// Returns the Duration remaining until the invoice expires given the current time.
+ /// `time` is the timestamp as a duration since the Unix epoch.
+ pub fn expiration_remaining_from_epoch(&self, time: Duration) -> Duration {
+ self.expires_at().map(|x| x.checked_sub(time)).flatten().unwrap_or(Duration::from_nanos(0))
+ }
+
/// Returns whether the expiry time would pass at the given point in time.
/// `at_time` is the timestamp as a duration since the Unix epoch.
pub fn would_expire(&self, at_time: Duration) -> bool {
/// Returns a list of all fallback addresses
///
- /// (C-not exported) as we don't support Vec<&NonOpaqueType>
+ /// This is not exported to bindings users as we don't support Vec<&NonOpaqueType>
pub fn fallbacks(&self) -> Vec<&Fallback> {
self.signed_invoice.fallbacks()
}
}
}
-/// (C-not exported)
+/// This is not exported to bindings users
impl Display for RawHrp {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
let amount = match self.raw_amount {
///
/// `invoice_expiry_delta_secs` describes the number of seconds that the invoice is valid for
/// in excess of the current time.
-///
+///
/// `duration_since_epoch` is the current time since epoch in seconds.
///
/// You can specify a custom `min_final_cltv_expiry_delta`, or let LDK default it to
/// [`ChannelManager::create_inbound_payment_for_hash`]: lightning::ln::channelmanager::ChannelManager::create_inbound_payment_for_hash
/// [`PhantomRouteHints::channels`]: lightning::ln::channelmanager::PhantomRouteHints::channels
/// [`MIN_FINAL_CLTV_EXPIRY_DETLA`]: lightning::ln::channelmanager::MIN_FINAL_CLTV_EXPIRY_DELTA
-///
+///
/// This can be used in a `no_std` environment, where [`std::time::SystemTime`] is not
/// available and the current time is supplied by the caller.
pub fn create_phantom_invoice<ES: Deref, NS: Deref, L: Deref>(
///
/// `invoice_expiry_delta_secs` describes the number of seconds that the invoice is valid for
/// in excess of the current time.
-///
+///
/// `duration_since_epoch` is the current time since epoch in seconds.
///
/// Note that the provided `keys_manager`'s `NodeSigner` implementation must support phantom
/// [`ChannelManager::create_inbound_payment`]: lightning::ln::channelmanager::ChannelManager::create_inbound_payment
/// [`ChannelManager::create_inbound_payment_for_hash`]: lightning::ln::channelmanager::ChannelManager::create_inbound_payment_for_hash
/// [`PhantomRouteHints::channels`]: lightning::ln::channelmanager::PhantomRouteHints::channels
-///
+///
/// This can be used in a `no_std` environment, where [`std::time::SystemTime`] is not
/// available and the current time is supplied by the caller.
pub fn create_phantom_invoice_with_description_hash<ES: Deref, NS: Deref, L: Deref>(
/// * Always select the channel with the highest inbound capacity per counterparty node
/// * Prefer channels with capacity at least `min_inbound_capacity_msat` and where the channel
/// `is_usable` (i.e. the peer is connected).
-/// * If any public channel exists, the returned `RouteHint`s will be empty, and the sender will
-/// need to find the path by looking at the public channels instead
+/// * If any public channel exists, only public [`RouteHint`]s will be returned.
+/// * If any public, announced, channel exists (i.e. a channel with 7+ confs, to ensure the
+/// announcement has had a chance to propagate), no [`RouteHint`]s will be returned, as the
+/// sender is expected to find the path by looking at the public channels instead.
fn filter_channels<L: Deref>(
channels: Vec<ChannelDetails>, min_inbound_capacity_msat: Option<u64>, logger: &L
) -> Vec<RouteHint> where L::Target: Logger {
let mut min_capacity_channel_exists = false;
let mut online_channel_exists = false;
let mut online_min_capacity_channel_exists = false;
+ let mut has_pub_unconf_chan = false;
log_trace!(logger, "Considering {} channels for invoice route hints", channels.len());
for channel in channels.into_iter().filter(|chan| chan.is_channel_ready) {
}
if channel.is_public {
- // If any public channel exists, return no hints and let the sender
- // look at the public channels instead.
- log_trace!(logger, "Not including channels in invoice route hints on account of public channel {}",
- log_bytes!(channel.channel_id));
- return vec![]
+ if channel.confirmations.is_some() && channel.confirmations < Some(7) {
+ // If we have a public channel, but it doesn't have enough confirmations to (yet)
+ // be in the public network graph (and have gotten a chance to propagate), include
+ // route hints but only for public channels to protect private channel privacy.
+ has_pub_unconf_chan = true;
+ } else {
+ // If any public channel exists, return no hints and let the sender
+ // look at the public channels instead.
+ log_trace!(logger, "Not including channels in invoice route hints on account of public channel {}",
+ log_bytes!(channel.channel_id));
+ return vec![]
+ }
}
if channel.inbound_capacity_msat >= min_inbound_capacity {
match filtered_channels.entry(channel.counterparty.node_id) {
hash_map::Entry::Occupied(mut entry) => {
let current_max_capacity = entry.get().inbound_capacity_msat;
- if channel.inbound_capacity_msat < current_max_capacity {
+ // If this channel is public and the previous channel is not, ensure we replace the
+ // previous channel to avoid announcing non-public channels.
+ let new_now_public = channel.is_public && !entry.get().is_public;
+ // If the public-ness of the channel has not changed (in which case simply defer to
+ // `new_now_public), and this channel has a greater capacity, prefer to announce
+ // this channel.
+ let new_higher_capacity = channel.is_public == entry.get().is_public &&
+ channel.inbound_capacity_msat > current_max_capacity;
+ if new_now_public || new_higher_capacity {
+ log_trace!(logger,
+ "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints",
+ log_pubkey!(channel.counterparty.node_id),
+ log_bytes!(channel.channel_id), channel.short_channel_id,
+ channel.inbound_capacity_msat,
+ log_bytes!(entry.get().channel_id), entry.get().short_channel_id,
+ current_max_capacity);
+ entry.insert(channel);
+ } else {
log_trace!(logger,
- "Preferring counterparty {} channel {} ({} msats) over {} ({} msats) for invoice route hints",
+ "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints",
log_pubkey!(channel.counterparty.node_id),
- log_bytes!(entry.get().channel_id), current_max_capacity,
- log_bytes!(channel.channel_id), channel.inbound_capacity_msat);
- continue;
+ log_bytes!(entry.get().channel_id), entry.get().short_channel_id,
+ current_max_capacity,
+ log_bytes!(channel.channel_id), channel.short_channel_id,
+ channel.inbound_capacity_msat);
}
- log_trace!(logger,
- "Preferring counterparty {} channel {} ({} msats) over {} ({} msats) for invoice route hints",
- log_pubkey!(channel.counterparty.node_id),
- log_bytes!(channel.channel_id), channel.inbound_capacity_msat,
- log_bytes!(entry.get().channel_id), current_max_capacity);
- entry.insert(channel);
}
hash_map::Entry::Vacant(entry) => {
entry.insert(channel);
.map(|(_, channel)| channel)
.filter(|channel| {
let has_enough_capacity = channel.inbound_capacity_msat >= min_inbound_capacity;
- let include_channel = if online_min_capacity_channel_exists {
+ let include_channel = if has_pub_unconf_chan {
+ // If we have a public channel, but it doesn't have enough confirmations to (yet)
+ // be in the public network graph (and have gotten a chance to propagate), include
+ // route hints but only for public channels to protect private channel privacy.
+ channel.is_public
+ } else if online_min_capacity_channel_exists {
has_enough_capacity && channel.is_usable
} else if min_capacity_channel_exists && online_channel_exists {
// If there are some online channels and some min_capacity channels, but no
log_trace!(logger, "Ignoring channel {} without enough capacity for invoice route hints",
log_bytes!(channel.channel_id));
} else {
- debug_assert!(!channel.is_usable);
+ debug_assert!(!channel.is_usable || (has_pub_unconf_chan && !channel.is_public));
log_trace!(logger, "Ignoring channel {} with disconnected peer",
log_bytes!(channel.channel_id));
}
use bitcoin_hashes::{Hash, sha256};
use bitcoin_hashes::sha256::Hash as Sha256;
use lightning::chain::keysinterface::{EntropySource, PhantomKeysManager};
+ use lightning::events::{MessageSendEvent, MessageSendEventsProvider, Event};
use lightning::ln::{PaymentPreimage, PaymentHash};
use lightning::ln::channelmanager::{PhantomRouteHints, MIN_FINAL_CLTV_EXPIRY_DELTA, PaymentId};
use lightning::ln::functional_test_utils::*;
use lightning::ln::msgs::ChannelMessageHandler;
use lightning::routing::router::{PaymentParameters, RouteParameters, find_route};
- use lightning::util::events::{MessageSendEvent, MessageSendEventsProvider, Event};
use lightning::util::test_utils;
use lightning::util::config::UserConfig;
use crate::utils::create_invoice_from_channelmanager_and_duration_since_epoch;
assert_eq!(invoice.payment_hash(), &sha256::Hash::from_slice(&payment_hash.0[..]).unwrap());
}
+ #[test]
+ fn test_hints_has_only_public_confd_channels() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let mut config = test_default_channel_config();
+ config.channel_handshake_config.minimum_depth = 1;
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Create a private channel with lots of capacity and a lower value public channel (without
+ // confirming the funding tx yet).
+ let unannounced_scid = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0);
+ let conf_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 10_000, 0);
+
+ // Before the channel is available, we should include the unannounced_scid.
+ let mut scid_aliases = HashSet::new();
+ scid_aliases.insert(unannounced_scid.0.short_channel_id_alias.unwrap());
+ match_invoice_routes(Some(5000), &nodes[1], scid_aliases.clone());
+
+ // However after we mine the funding tx and exchange channel_ready messages for the public
+ // channel we'll immediately switch to including it as a route hint, even though it isn't
+ // yet announced.
+ let pub_channel_scid = mine_transaction(&nodes[0], &conf_tx);
+ let node_a_pub_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &node_a_pub_channel_ready);
+
+ assert_eq!(mine_transaction(&nodes[1], &conf_tx), pub_channel_scid);
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ if let MessageSendEvent::SendChannelReady { msg, .. } = &events[0] {
+ nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), msg);
+ } else { panic!(); }
+ if let MessageSendEvent::SendChannelUpdate { msg, .. } = &events[1] {
+ nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), msg);
+ } else { panic!(); }
+
+ nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()));
+
+ expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
+ expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+ scid_aliases.clear();
+ scid_aliases.insert(node_a_pub_channel_ready.short_channel_id_alias.unwrap());
+ match_invoice_routes(Some(5000), &nodes[1], scid_aliases.clone());
+ // This also applies even if the amount is more than the payment amount, to ensure users
+ // dont screw up their privacy.
+ match_invoice_routes(Some(50_000_000), &nodes[1], scid_aliases.clone());
+
+ // The same remains true until the channel has 7 confirmations, at which point we include
+ // no hints.
+ connect_blocks(&nodes[1], 5);
+ match_invoice_routes(Some(5000), &nodes[1], scid_aliases.clone());
+ connect_blocks(&nodes[1], 1);
+ get_event_msg!(nodes[1], MessageSendEvent::SendAnnouncementSignatures, nodes[0].node.get_our_node_id());
+ match_invoice_routes(Some(5000), &nodes[1], HashSet::new());
+ }
+
#[test]
fn test_hints_includes_single_channels_to_nodes() {
let chanmon_cfgs = create_chanmon_cfgs(3);
tokio = { version = "1.0", features = [ "io-util", "macros", "rt", "sync", "net", "time" ] }
[dev-dependencies]
-tokio = { version = "~1.14", features = [ "io-util", "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
+tokio = { version = "1.14", features = [ "io-util", "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
lightning = { version = "0.0.114", path = "../lightning", features = ["_test_utils"] }
// licenses.
//! A socket handling library for those running in Tokio environments who wish to use
-//! rust-lightning with native TcpStreams.
+//! rust-lightning with native [`TcpStream`]s.
//!
//! Designed to be as simple as possible, the high-level usage is almost as simple as "hand over a
-//! TcpStream and a reference to a PeerManager and the rest is handled", except for the
-//! [Event](../lightning/util/events/enum.Event.html) handling mechanism; see example below.
+//! [`TcpStream`] and a reference to a [`PeerManager`] and the rest is handled".
//!
-//! The PeerHandler, due to the fire-and-forget nature of this logic, must be an Arc, and must use
-//! the SocketDescriptor provided here as the PeerHandler's SocketDescriptor.
+//! The [`PeerManager`], due to the fire-and-forget nature of this logic, must be a reference,
+//! (e.g. an [`Arc`]) and must use the [`SocketDescriptor`] provided here as the [`PeerManager`]'s
+//! `SocketDescriptor` implementation.
//!
-//! Three methods are exposed to register a new connection for handling in tokio::spawn calls; see
-//! their individual docs for details.
+//! Three methods are exposed to register a new connection for handling in [`tokio::spawn`] calls;
+//! see their individual docs for details.
//!
-//! # Example
-//! ```
-//! use std::net::TcpStream;
-//! use bitcoin::secp256k1::PublicKey;
-//! use lightning::util::events::{Event, EventHandler, EventsProvider};
-//! use std::net::SocketAddr;
-//! use std::sync::Arc;
-//!
-//! // Define concrete types for our high-level objects:
-//! type TxBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
-//! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
-//! type Logger = dyn lightning::util::logger::Logger + Send + Sync;
-//! type NodeSigner = dyn lightning::chain::keysinterface::NodeSigner + Send + Sync;
-//! type UtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
-//! type ChainFilter = dyn lightning::chain::Filter + Send + Sync;
-//! type DataPersister = dyn lightning::chain::chainmonitor::Persist<lightning::chain::keysinterface::InMemorySigner> + Send + Sync;
-//! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::chain::keysinterface::InMemorySigner, Arc<ChainFilter>, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>, Arc<DataPersister>>;
-//! type ChannelManager = Arc<lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor, TxBroadcaster, FeeEstimator, Logger>>;
-//! type PeerManager = Arc<lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChainMonitor, TxBroadcaster, FeeEstimator, UtxoLookup, Logger>>;
-//!
-//! // Connect to node with pubkey their_node_id at addr:
-//! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
-//! lightning_net_tokio::connect_outbound(peer_manager, their_node_id, addr).await;
-//! loop {
-//! let event_handler = |event: Event| {
-//! // Handle the event!
-//! };
-//! channel_manager.await_persistable_update();
-//! channel_manager.process_pending_events(&event_handler);
-//! chain_monitor.process_pending_events(&event_handler);
-//! }
-//! }
-//!
-//! // Begin reading from a newly accepted socket and talk to the peer:
-//! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
-//! lightning_net_tokio::setup_inbound(peer_manager, socket);
-//! loop {
-//! let event_handler = |event: Event| {
-//! // Handle the event!
-//! };
-//! channel_manager.await_persistable_update();
-//! channel_manager.process_pending_events(&event_handler);
-//! chain_monitor.process_pending_events(&event_handler);
-//! }
-//! }
-//! ```
+//! [`PeerManager`]: lightning::ln::peer_handler::PeerManager
// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
#![deny(broken_intra_doc_links)]
use lightning::ln::peer_handler::{MessageHandler, PeerManager};
use lightning::ln::features::NodeFeatures;
use lightning::routing::gossip::NodeId;
- use lightning::util::events::*;
+ use lightning::events::*;
use lightning::util::test_utils::TestNodeSigner;
use bitcoin::secp256k1::{Secp256k1, SecretKey, PublicKey};
use bitcoin::{Txid, TxMerkleNode};
use lightning::chain::ChannelMonitorUpdateStatus;
use lightning::chain::chainmonitor::Persist;
+ use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
use lightning::chain::transaction::OutPoint;
use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
+ use lightning::events::{ClosureReason, MessageSendEventsProvider};
use lightning::ln::functional_test_utils::*;
- use lightning::util::events::{ClosureReason, MessageSendEventsProvider};
use lightning::util::test_utils;
use std::fs;
use bitcoin::hashes::Hash;
check_added_monitors!(nodes[1], 1);
// Make sure everything is persisted as expected after close.
- check_persisted_data!(11);
+ check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
}
// Test that if the persister's path to channel data is read-only, writing a
//! # use lightning::util::logger::{Logger, Record};
//! # struct FakeLogger {}
//! # impl Logger for FakeLogger {
-//! # fn log(&self, record: &Record) { unimplemented!() }
+//! # fn log(&self, record: &Record) { }
//! # }
//! # let logger = FakeLogger {};
//!
/// Gets a reference to the underlying [`NetworkGraph`] which was provided in
/// [`RapidGossipSync::new`].
///
- /// (C-not exported) as bindings don't support a reference-to-a-reference yet
+ /// This is not exported to bindings users as bindings don't support a reference-to-a-reference yet
pub fn network_graph(&self) -> &NG {
&self.network_graph
}
};
use lightning::routing::gossip::NetworkGraph;
use lightning::util::logger::Logger;
-use lightning::{log_warn, log_trace, log_given_level};
+use lightning::{log_debug, log_warn, log_trace, log_given_level, log_gossip};
use lightning::util::ser::{BigSize, Readable};
use lightning::io;
mut read_cursor: &mut R,
current_time_unix: Option<u64>
) -> Result<u32, GraphSyncError> {
+ log_trace!(self.logger, "Processing RGS data...");
let mut prefix = [0u8; 4];
read_cursor.read_exact(&mut prefix)?;
let node_id_1 = node_ids[node_id_1_index.0 as usize];
let node_id_2 = node_ids[node_id_2_index.0 as usize];
+ log_gossip!(self.logger, "Adding channel {} from RGS announcement at {}",
+ short_channel_id, latest_seen_timestamp);
+
let announcement_result = network_graph.add_channel_from_partial_announcement(
short_channel_id,
backdated_timestamp as u64,
previous_scid = 0; // updates start at a new scid
let update_count: u32 = Readable::read(read_cursor)?;
+ log_debug!(self.logger, "Processing RGS update from {} with {} nodes, {} channel announcements and {} channel updates.",
+ latest_seen_timestamp, node_id_count, announcement_count, update_count);
if update_count == 0 {
return Ok(latest_seen_timestamp);
}
continue;
}
+ log_gossip!(self.logger, "Updating channel {} with flags {} from RGS announcement at {}",
+ short_channel_id, channel_flags, latest_seen_timestamp);
match network_graph.update_channel_unsigned(&synthetic_update) {
Ok(_) => {},
Err(LightningError { action: ErrorAction::IgnoreDuplicateGossip, .. }) => {},
self.network_graph.set_last_rapid_gossip_sync_timestamp(latest_seen_timestamp);
self.is_initial_sync_complete.store(true, Ordering::Release);
+ log_trace!(self.logger, "Done processing RGS data from {}", latest_seen_timestamp);
Ok(latest_seen_timestamp)
}
}
bitcoin = { version = "0.29.0", default-features = false }
bdk-macros = "0.6"
futures = { version = "0.3", optional = true }
-esplora-client = { version = "0.3.0", default-features = false, optional = true }
+esplora-client = { version = "0.4", default-features = false, optional = true }
reqwest = { version = "0.11", optional = true, default-features = false, features = ["json"] }
[dev-dependencies]
lightning = { version = "0.0.114", path = "../lightning", features = ["std"] }
electrsd = { version = "0.22.0", features = ["legacy", "esplora_a33e97e1", "bitcoind_23_0"] }
electrum-client = "0.12.0"
-once_cell = "1.16.0"
tokio = { version = "1.14.0", features = ["full"] }
use bitcoind::bitcoincore_rpc::RpcApi;
use electrum_client::ElectrumApi;
-use once_cell::sync::OnceCell;
-
use std::env;
use std::sync::Mutex;
use std::time::Duration;
use std::collections::{HashMap, HashSet};
-static BITCOIND: OnceCell<BitcoinD> = OnceCell::new();
-static ELECTRSD: OnceCell<ElectrsD> = OnceCell::new();
-static PREMINE: OnceCell<()> = OnceCell::new();
-static MINER_LOCK: OnceCell<Mutex<()>> = OnceCell::new();
-
-fn get_bitcoind() -> &'static BitcoinD {
- BITCOIND.get_or_init(|| {
- let bitcoind_exe =
- env::var("BITCOIND_EXE").ok().or_else(|| bitcoind::downloaded_exe_path().ok()).expect(
- "you need to provide an env var BITCOIND_EXE or specify a bitcoind version feature",
- );
- let mut conf = bitcoind::Conf::default();
- conf.network = "regtest";
- let bitcoind = BitcoinD::with_conf(bitcoind_exe, &conf).unwrap();
- std::thread::sleep(Duration::from_secs(1));
- bitcoind
- })
-}
-
-fn get_electrsd() -> &'static ElectrsD {
- ELECTRSD.get_or_init(|| {
- let bitcoind = get_bitcoind();
- let electrs_exe =
- env::var("ELECTRS_EXE").ok().or_else(electrsd::downloaded_exe_path).expect(
- "you need to provide env var ELECTRS_EXE or specify an electrsd version feature",
- );
- let mut conf = electrsd::Conf::default();
- conf.http_enabled = true;
- conf.network = "regtest";
- let electrsd = ElectrsD::with_conf(electrs_exe, &bitcoind, &conf).unwrap();
- std::thread::sleep(Duration::from_secs(1));
- electrsd
- })
+pub fn setup_bitcoind_and_electrsd() -> (BitcoinD, ElectrsD) {
+ let bitcoind_exe =
+ env::var("BITCOIND_EXE").ok().or_else(|| bitcoind::downloaded_exe_path().ok()).expect(
+ "you need to provide an env var BITCOIND_EXE or specify a bitcoind version feature",
+ );
+ let mut bitcoind_conf = bitcoind::Conf::default();
+ bitcoind_conf.network = "regtest";
+ let bitcoind = BitcoinD::with_conf(bitcoind_exe, &bitcoind_conf).unwrap();
+
+ let electrs_exe = env::var("ELECTRS_EXE")
+ .ok()
+ .or_else(electrsd::downloaded_exe_path)
+ .expect("you need to provide env var ELECTRS_EXE or specify an electrsd version feature");
+ let mut electrsd_conf = electrsd::Conf::default();
+ electrsd_conf.http_enabled = true;
+ electrsd_conf.network = "regtest";
+ let electrsd = ElectrsD::with_conf(electrs_exe, &bitcoind, &electrsd_conf).unwrap();
+ (bitcoind, electrsd)
}
-fn generate_blocks_and_wait(num: usize) {
- let miner_lock = MINER_LOCK.get_or_init(|| Mutex::new(()));
- let _miner = miner_lock.lock().unwrap();
- let cur_height = get_bitcoind().client.get_block_count().expect("failed to get current block height");
- let address = get_bitcoind().client.get_new_address(Some("test"), Some(AddressType::Legacy)).expect("failed to get new address");
+pub fn generate_blocks_and_wait(bitcoind: &BitcoinD, electrsd: &ElectrsD, num: usize) {
+ let cur_height = bitcoind.client.get_block_count().expect("failed to get current block height");
+ let address = bitcoind
+ .client
+ .get_new_address(Some("test"), Some(AddressType::Legacy))
+ .expect("failed to get new address");
// TODO: expect this Result once the WouldBlock issue is resolved upstream.
- let _block_hashes_res = get_bitcoind().client.generate_to_address(num as u64, &address);
- wait_for_block(cur_height as usize + num);
+ let _block_hashes_res = bitcoind.client.generate_to_address(num as u64, &address);
+ wait_for_block(electrsd, cur_height as usize + num);
}
-fn wait_for_block(min_height: usize) {
- let mut header = match get_electrsd().client.block_headers_subscribe() {
+pub fn wait_for_block(electrsd: &ElectrsD, min_height: usize) {
+ let mut header = match electrsd.client.block_headers_subscribe() {
Ok(header) => header,
Err(_) => {
// While subscribing should succeed the first time around, we ran into some cases where
// it didn't. Since we can't proceed without subscribing, we try again after a delay
// and panic if it still fails.
std::thread::sleep(Duration::from_secs(1));
- get_electrsd().client.block_headers_subscribe().expect("failed to subscribe to block headers")
+ electrsd.client.block_headers_subscribe().expect("failed to subscribe to block headers")
}
};
-
loop {
if header.height >= min_height {
break;
}
header = exponential_backoff_poll(|| {
- get_electrsd().trigger().expect("failed to trigger electrsd");
- get_electrsd().client.ping().expect("failed to ping electrsd");
- get_electrsd().client.block_headers_pop().expect("failed to pop block header")
+ electrsd.trigger().expect("failed to trigger electrsd");
+ electrsd.client.ping().expect("failed to ping electrsd");
+ electrsd.client.block_headers_pop().expect("failed to pop block header")
});
}
}
}
}
-fn premine() {
- PREMINE.get_or_init(|| {
- generate_blocks_and_wait(101);
- });
-}
-
#[derive(Debug)]
enum TestConfirmableEvent {
Confirmed(Txid, BlockHash, u32),
#[test]
#[cfg(feature = "esplora-blocking")]
fn test_esplora_syncs() {
- premine();
+ let (bitcoind, electrsd) = setup_bitcoind_and_electrsd();
+ generate_blocks_and_wait(&bitcoind, &electrsd, 101);
let mut logger = TestLogger {};
- let esplora_url = format!("http://{}", get_electrsd().esplora_url.as_ref().unwrap());
+ let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap());
let tx_sync = EsploraSyncClient::new(esplora_url, &mut logger);
let confirmable = TestConfirmable::new();
// Check we pick up on new best blocks
- let expected_height = 0u32;
- assert_eq!(confirmable.best_block.lock().unwrap().1, expected_height);
+ assert_eq!(confirmable.best_block.lock().unwrap().1, 0);
tx_sync.sync(vec![&confirmable]).unwrap();
-
- let expected_height = get_bitcoind().client.get_block_count().unwrap() as u32;
- assert_eq!(confirmable.best_block.lock().unwrap().1, expected_height);
+ assert_eq!(confirmable.best_block.lock().unwrap().1, 102);
let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
assert_eq!(events.len(), 1);
// Check registered confirmed transactions are marked confirmed
- let new_address = get_bitcoind().client.get_new_address(Some("test"), Some(AddressType::Legacy)).unwrap();
- let txid = get_bitcoind().client.send_to_address(&new_address, Amount::from_sat(5000), None, None, None, None, None, None).unwrap();
+ let new_address = bitcoind.client.get_new_address(Some("test"), Some(AddressType::Legacy)).unwrap();
+ let txid = bitcoind.client.send_to_address(&new_address, Amount::from_sat(5000), None, None, None, None, None, None).unwrap();
tx_sync.register_tx(&txid, &new_address.script_pubkey());
tx_sync.sync(vec![&confirmable]).unwrap();
assert!(confirmable.confirmed_txs.lock().unwrap().is_empty());
assert!(confirmable.unconfirmed_txs.lock().unwrap().is_empty());
- generate_blocks_and_wait(1);
+ generate_blocks_and_wait(&bitcoind, &electrsd, 1);
tx_sync.sync(vec![&confirmable]).unwrap();
let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
assert!(confirmable.unconfirmed_txs.lock().unwrap().is_empty());
// Check previously confirmed transactions are marked unconfirmed when they are reorged.
- let best_block_hash = get_bitcoind().client.get_best_block_hash().unwrap();
- get_bitcoind().client.invalidate_block(&best_block_hash).unwrap();
+ let best_block_hash = bitcoind.client.get_best_block_hash().unwrap();
+ bitcoind.client.invalidate_block(&best_block_hash).unwrap();
// We're getting back to the previous height with a new tip, but best block shouldn't change.
- generate_blocks_and_wait(1);
- assert_ne!(get_bitcoind().client.get_best_block_hash().unwrap(), best_block_hash);
+ generate_blocks_and_wait(&bitcoind, &electrsd, 1);
+ assert_ne!(bitcoind.client.get_best_block_hash().unwrap(), best_block_hash);
tx_sync.sync(vec![&confirmable]).unwrap();
let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
assert_eq!(events.len(), 0);
// Now we're surpassing previous height, getting new tip.
- generate_blocks_and_wait(1);
- assert_ne!(get_bitcoind().client.get_best_block_hash().unwrap(), best_block_hash);
+ generate_blocks_and_wait(&bitcoind, &electrsd, 1);
+ assert_ne!(bitcoind.client.get_best_block_hash().unwrap(), best_block_hash);
tx_sync.sync(vec![&confirmable]).unwrap();
// Transaction still confirmed but under new tip.
#[tokio::test]
#[cfg(feature = "esplora-async")]
async fn test_esplora_syncs() {
- premine();
+ let (bitcoind, electrsd) = setup_bitcoind_and_electrsd();
+ generate_blocks_and_wait(&bitcoind, &electrsd, 101);
let mut logger = TestLogger {};
- let esplora_url = format!("http://{}", get_electrsd().esplora_url.as_ref().unwrap());
+ let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap());
let tx_sync = EsploraSyncClient::new(esplora_url, &mut logger);
let confirmable = TestConfirmable::new();
// Check we pick up on new best blocks
- let expected_height = 0u32;
- assert_eq!(confirmable.best_block.lock().unwrap().1, expected_height);
+ assert_eq!(confirmable.best_block.lock().unwrap().1, 0);
tx_sync.sync(vec![&confirmable]).await.unwrap();
-
- let expected_height = get_bitcoind().client.get_block_count().unwrap() as u32;
- assert_eq!(confirmable.best_block.lock().unwrap().1, expected_height);
+ assert_eq!(confirmable.best_block.lock().unwrap().1, 102);
let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
assert_eq!(events.len(), 1);
// Check registered confirmed transactions are marked confirmed
- let new_address = get_bitcoind().client.get_new_address(Some("test"), Some(AddressType::Legacy)).unwrap();
- let txid = get_bitcoind().client.send_to_address(&new_address, Amount::from_sat(5000), None, None, None, None, None, None).unwrap();
+ let new_address = bitcoind.client.get_new_address(Some("test"), Some(AddressType::Legacy)).unwrap();
+ let txid = bitcoind.client.send_to_address(&new_address, Amount::from_sat(5000), None, None, None, None, None, None).unwrap();
tx_sync.register_tx(&txid, &new_address.script_pubkey());
tx_sync.sync(vec![&confirmable]).await.unwrap();
assert!(confirmable.confirmed_txs.lock().unwrap().is_empty());
assert!(confirmable.unconfirmed_txs.lock().unwrap().is_empty());
- generate_blocks_and_wait(1);
+ generate_blocks_and_wait(&bitcoind, &electrsd, 1);
tx_sync.sync(vec![&confirmable]).await.unwrap();
let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
assert!(confirmable.unconfirmed_txs.lock().unwrap().is_empty());
// Check previously confirmed transactions are marked unconfirmed when they are reorged.
- let best_block_hash = get_bitcoind().client.get_best_block_hash().unwrap();
- get_bitcoind().client.invalidate_block(&best_block_hash).unwrap();
+ let best_block_hash = bitcoind.client.get_best_block_hash().unwrap();
+ bitcoind.client.invalidate_block(&best_block_hash).unwrap();
// We're getting back to the previous height with a new tip, but best block shouldn't change.
- generate_blocks_and_wait(1);
- assert_ne!(get_bitcoind().client.get_best_block_hash().unwrap(), best_block_hash);
+ generate_blocks_and_wait(&bitcoind, &electrsd, 1);
+ assert_ne!(bitcoind.client.get_best_block_hash().unwrap(), best_block_hash);
tx_sync.sync(vec![&confirmable]).await.unwrap();
let events = std::mem::take(&mut *confirmable.events.lock().unwrap());
assert_eq!(events.len(), 0);
// Now we're surpassing previous height, getting new tip.
- generate_blocks_and_wait(1);
- assert_ne!(get_bitcoind().client.get_best_block_hash().unwrap(), best_block_hash);
+ generate_blocks_and_wait(&bitcoind, &electrsd, 1);
+ assert_ne!(bitcoind.client.get_best_block_hash().unwrap(), best_block_hash);
tx_sync.sync(vec![&confirmable]).await.unwrap();
// Transaction still confirmed but under new tip.
version = "0.29.0"
default-features = false
features = ["bitcoinconsensus", "secp-recovery"]
+
+[target.'cfg(taproot)'.dependencies]
+musig2 = { git = "https://github.com/arik-so/rust-musig2", rev = "27797d7" }
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::chain::keysinterface::WriteableEcdsaChannelSigner;
+use crate::events;
+use crate::events::{Event, EventHandler};
use crate::util::atomic_counter::AtomicCounter;
use crate::util::logger::Logger;
use crate::util::errors::APIError;
-use crate::util::events;
-use crate::util::events::{Event, EventHandler};
+use crate::util::wakers::{Future, Notifier};
use crate::ln::channelmanager::ChannelDetails;
use crate::prelude::*;
pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>>,
/// The best block height seen, used as a proxy for the passage of time.
highest_chain_height: AtomicUsize,
+
+ event_notifier: Notifier,
}
impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> ChainMonitor<ChannelSigner, C, T, F, L, P>
ChannelMonitorUpdateStatus::PermanentFailure => {
monitor_state.channel_perm_failed.store(true, Ordering::Release);
self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
+ self.event_notifier.notify();
},
ChannelMonitorUpdateStatus::InProgress => {
log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
persister,
pending_monitor_events: Mutex::new(Vec::new()),
highest_chain_height: AtomicUsize::new(0),
+ event_notifier: Notifier::new(),
}
}
}
},
}
+ self.event_notifier.notify();
Ok(())
}
funding_txo,
monitor_update_id,
}], counterparty_node_id));
+ self.event_notifier.notify();
}
#[cfg(any(test, fuzzing, feature = "_test_utils"))]
pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
- use crate::util::events::EventsProvider;
+ use crate::events::EventsProvider;
let events = core::cell::RefCell::new(Vec::new());
let event_handler = |event: events::Event| events.borrow_mut().push(event);
self.process_pending_events(&event_handler);
///
/// See the trait-level documentation of [`EventsProvider`] for requirements.
///
- /// [`EventsProvider`]: crate::util::events::EventsProvider
+ /// [`EventsProvider`]: crate::events::EventsProvider
pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
&self, handler: H
) {
handler(event).await;
}
}
+
+ /// Gets a [`Future`] that completes when an event is available either via
+ /// [`chain::Watch::release_pending_monitor_events`] or
+ /// [`EventsProvider::process_pending_events`].
+ ///
+ /// Note that callbacks registered on the [`Future`] MUST NOT call back into this
+ /// [`ChainMonitor`] and should instead register actions to be taken later.
+ ///
+ /// [`EventsProvider::process_pending_events`]: crate::events::EventsProvider::process_pending_events
+ pub fn get_update_future(&self) -> Future {
+ self.event_notifier.get_future()
+ }
}
impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
use crate::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
+ use crate::events::{Event, ClosureReason, MessageSendEvent, MessageSendEventsProvider};
use crate::ln::channelmanager::{PaymentSendFailure, PaymentId};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::ChannelMessageHandler;
use crate::util::errors::APIError;
- use crate::util::events::{Event, ClosureReason, MessageSendEvent, MessageSendEventsProvider};
#[test]
fn test_async_ooo_offchain_updates() {
use crate::util::logger::Logger;
use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48};
use crate::util::byte_utils;
-use crate::util::events::Event;
+use crate::events::Event;
#[cfg(anchors)]
-use crate::util::events::{AnchorDescriptor, HTLCDescriptor, BumpTransactionEvent};
+use crate::events::bump_transaction::{AnchorDescriptor, HTLCDescriptor, BumpTransactionEvent};
use crate::prelude::*;
use core::{cmp, mem};
/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
/// transaction), a single update may reach upwards of 1 MiB in serialized size.
-#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
-#[derive(Clone)]
+#[derive(Clone, PartialEq, Eq)]
#[must_use]
pub struct ChannelMonitorUpdate {
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
/// The sequence number of this update. Updates *must* be replayed in-order according to this
/// sequence number (and updates may panic if they are not). The update_id values are strictly
- /// increasing and increase by one for each new update, with one exception specified below.
+ /// increasing and increase by one for each new update, with two exceptions specified below.
///
/// This sequence number is also used to track up to which points updates which returned
/// [`ChannelMonitorUpdateStatus::InProgress`] have been applied to all copies of a given
/// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
///
- /// The only instance where update_id values are not strictly increasing is the case where we
- /// allow post-force-close updates with a special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. See
- /// its docs for more details.
+ /// The only instances we allow where update_id values are not strictly increasing have a
+ /// special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. This update ID is used for updates that
+ /// will force close the channel by broadcasting the latest commitment transaction or
+ /// special post-force-close updates, like providing preimages necessary to claim outputs on the
+ /// broadcast commitment transaction. See its docs for more details.
///
/// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
pub update_id: u64,
}
-/// If:
-/// (1) a channel has been force closed and
-/// (2) we receive a preimage from a forward link that allows us to spend an HTLC output on
-/// this channel's (the backward link's) broadcasted commitment transaction
-/// then we allow the `ChannelManager` to send a `ChannelMonitorUpdate` with this update ID,
-/// with the update providing said payment preimage. No other update types are allowed after
-/// force-close.
+/// The update ID used for a [`ChannelMonitorUpdate`] that is either:
+///
+/// (1) attempting to force close the channel by broadcasting our latest commitment transaction or
+/// (2) providing a preimage (after the channel has been force closed) from a forward link that
+/// allows us to spend an HTLC output on this channel's (the backward link's) broadcasted
+/// commitment transaction.
+///
+/// No other [`ChannelMonitorUpdate`]s are allowed after force-close.
pub const CLOSED_CHANNEL_UPDATE_ID: u64 = core::u64::MAX;
impl Writeable for ChannelMonitorUpdate {
);
-#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
-#[derive(Clone)]
+#[derive(Clone, PartialEq, Eq)]
pub(crate) enum ChannelMonitorUpdateStep {
LatestHolderCommitmentTXInfo {
commitment_tx: HolderCommitmentTransaction,
payment_hash, payment_preimage, broadcaster, fee_estimator, logger)
}
- pub(crate) fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(
- &self,
- broadcaster: &B,
- logger: &L,
- ) where
- B::Target: BroadcasterInterface,
- L::Target: Logger,
- {
- self.inner.lock().unwrap().broadcast_latest_holder_commitment_txn(broadcaster, logger);
- }
-
/// Updates a ChannelMonitor on the basis of some new information provided by the Channel
/// itself.
///
/// This is called by the [`EventsProvider::process_pending_events`] implementation for
/// [`ChainMonitor`].
///
- /// [`EventsProvider::process_pending_events`]: crate::util::events::EventsProvider::process_pending_events
+ /// [`EventsProvider::process_pending_events`]: crate::events::EventsProvider::process_pending_events
/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
pub fn get_and_clear_pending_events(&self) -> Vec<Event> {
self.inner.lock().unwrap().get_and_clear_pending_events()
{
log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} changes.",
log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
- // ChannelMonitor updates may be applied after force close if we receive a
- // preimage for a broadcasted commitment transaction HTLC output that we'd
- // like to claim on-chain. If this is the case, we no longer have guaranteed
- // access to the monitor's update ID, so we use a sentinel value instead.
+ // ChannelMonitor updates may be applied after force close if we receive a preimage for a
+ // broadcasted commitment transaction HTLC output that we'd like to claim on-chain. If this
+ // is the case, we no longer have guaranteed access to the monitor's update ID, so we use a
+ // sentinel value instead.
+ //
+ // The `ChannelManager` may also queue redundant `ChannelForceClosed` updates if it still
+ // thinks the channel needs to have its commitment transaction broadcast, so we'll allow
+ // them as well.
if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
assert_eq!(updates.updates.len(), 1);
match updates.updates[0] {
- ChannelMonitorUpdateStep::PaymentPreimage { .. } => {},
+ ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
+ // We should have already seen a `ChannelForceClosed` update if we're trying to
+ // provide a preimage at this point.
+ ChannelMonitorUpdateStep::PaymentPreimage { .. } =>
+ debug_assert_eq!(self.latest_update_id, CLOSED_CHANNEL_UPDATE_ID),
_ => {
log_error!(logger, "Attempted to apply post-force-close ChannelMonitorUpdate of type {}", updates.updates[0].variant_name());
panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage");
},
}
}
+
+ // If the updates succeeded and we were in an already closed channel state, then there's no
+ // need to refuse any updates we expect to receive afer seeing a confirmed commitment.
+ if ret.is_ok() && updates.update_id == CLOSED_CHANNEL_UPDATE_ID && self.latest_update_id == updates.update_id {
+ return Ok(());
+ }
+
self.latest_update_id = updates.update_id;
if ret.is_ok() && self.funding_spend_seen {
}));
},
ClaimEvent::BumpHTLC {
- target_feerate_sat_per_1000_weight, htlcs,
+ target_feerate_sat_per_1000_weight, htlcs, tx_lock_time,
} => {
let mut htlc_descriptors = Vec::with_capacity(htlcs.len());
for htlc in htlcs {
ret.push(Event::BumpTransaction(BumpTransactionEvent::HTLCResolution {
target_feerate_sat_per_1000_weight,
htlc_descriptors,
+ tx_lock_time,
}));
}
}
}
}
-impl<Signer: WriteableEcdsaChannelSigner, T: Deref, F: Deref, L: Deref> chain::Confirm for (ChannelMonitor<Signer>, T, F, L)
+impl<Signer: WriteableEcdsaChannelSigner, M, T: Deref, F: Deref, L: Deref> chain::Confirm for (M, T, F, L)
where
+ M: Deref<Target = ChannelMonitor<Signer>>,
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT};
use crate::chain::transaction::OutPoint;
use crate::chain::keysinterface::InMemorySigner;
+ use crate::events::ClosureReason;
use crate::ln::{PaymentPreimage, PaymentHash};
use crate::ln::chan_utils;
use crate::ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
use crate::ln::functional_test_utils::*;
use crate::ln::script::ShutdownScript;
use crate::util::errors::APIError;
- use crate::util::events::{ClosureReason, MessageSendEventsProvider};
use crate::util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator};
use crate::util::ser::{ReadableArgs, Writeable};
use crate::sync::{Arc, Mutex};
use crate::util::transaction_utils;
use crate::util::crypto::{hkdf_extract_expand_twice, sign};
use crate::util::ser::{Writeable, Writer, Readable};
-#[cfg(anchors)]
-use crate::util::events::HTLCDescriptor;
use crate::chain::transaction::OutPoint;
+#[cfg(anchors)]
+use crate::events::bump_transaction::HTLCDescriptor;
use crate::ln::channel::ANCHOR_OUTPUT_VALUE_SATOSHI;
use crate::ln::{chan_utils, PaymentPreimage};
use crate::ln::chan_utils::{HTLCOutputInCommitment, make_funding_redeemscript, ChannelPublicKeys, HolderCommitmentTransaction, ChannelTransactionParameters, CommitmentTransaction, ClosingTransaction};
/// Used as initial key material, to be expanded into multiple secret keys (but not to be used
/// directly). This is used within LDK to encrypt/decrypt inbound payment data.
///
-/// (C-not exported) as we just use `[u8; 32]` directly
+/// This is not exported to bindings users as we just use `[u8; 32]` directly
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
pub struct KeyMaterial(pub [u8; 32]);
/// outpoint describing which `txid` and output `index` is available, the full output which exists
/// at that `txid`/`index`, and any keys or other information required to sign.
///
-/// [`SpendableOutputs`]: crate::util::events::Event::SpendableOutputs
+/// [`SpendableOutputs`]: crate::events::Event::SpendableOutputs
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum SpendableOutputDescriptor {
/// An output to a script which was provided via [`SignerProvider`] directly, either from
//! OnchainTxHandler objects are fully-part of ChannelMonitor and encapsulates all
//! building, tracking, bumping and notifications functions.
+#[cfg(anchors)]
+use bitcoin::PackedLockTime;
use bitcoin::blockdata::transaction::Transaction;
use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
use bitcoin::blockdata::script::Script;
}
}
-/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
-/// once they mature to enough confirmations (ANTI_REORG_DELAY)
+/// Events for claims the [`OnchainTxHandler`] has generated. Once the events are considered safe
+/// from a chain reorg, the [`OnchainTxHandler`] will act accordingly.
#[derive(PartialEq, Eq)]
enum OnchainEvent {
- /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
- /// bump-txn candidate buffer.
+ /// A pending request has been claimed by a transaction spending the exact same set of outpoints
+ /// as the request. This claim can either be ours or from the counterparty. Once the claiming
+ /// transaction has met [`ANTI_REORG_DELAY`] confirmations, we consider it final and remove the
+ /// pending request.
Claim {
package_id: PackageID,
},
- /// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a counterparty party tx.
- /// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
- /// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
+ /// The counterparty has claimed an outpoint from one of our pending requests through a
+ /// different transaction than ours. If our transaction was attempting to claim multiple
+ /// outputs, we need to drop the outpoint claimed by the counterparty and regenerate a new claim
+ /// transaction for ourselves. We keep tracking, separately, the outpoint claimed by the
+ /// counterparty up to [`ANTI_REORG_DELAY`] confirmations to ensure we attempt to re-claim it
+ /// if the counterparty's claim is reorged from the chain.
ContentiousOutpoint {
package: PackageTemplate,
}
BumpHTLC {
target_feerate_sat_per_1000_weight: u32,
htlcs: Vec<ExternalHTLCClaim>,
+ tx_lock_time: PackedLockTime,
},
}
/// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
/// do RBF bumping if possible.
-#[derive(PartialEq)]
pub struct OnchainTxHandler<ChannelSigner: WriteableEcdsaChannelSigner> {
destination_script: Script,
holder_commitment: HolderCommitmentTransaction,
pub(crate) pending_claim_requests: HashMap<PackageID, PackageTemplate>,
#[cfg(not(test))]
pending_claim_requests: HashMap<PackageID, PackageTemplate>,
+
+ // Used to track external events that need to be forwarded to the `ChainMonitor`. This `Vec`
+ // essentially acts as an insertion-ordered `HashMap` – there should only ever be one occurrence
+ // of a `PackageID`, which tracks its latest `ClaimEvent`, i.e., if a pending claim exists, and
+ // a new block has been connected, resulting in a new claim, the previous will be replaced with
+ // the new.
+ //
+ // These external events may be generated in the following cases:
+ // - A channel has been force closed by broadcasting the holder's latest commitment transaction
+ // - A block being connected/disconnected
+ // - Learning the preimage for an HTLC we can claim onchain
#[cfg(anchors)]
- pending_claim_events: HashMap<PackageID, ClaimEvent>,
-
- // Used to link outpoints claimed in a connected block to a pending claim request.
- // Key is outpoint than monitor parsing has detected we have keys/scripts to claim
- // Value is (pending claim request identifier, confirmation_block), identifier
- // is txid of the initial claiming transaction and is immutable until outpoint is
- // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
- // block with output gets disconnected.
+ pending_claim_events: Vec<(PackageID, ClaimEvent)>,
+
+ // Used to link outpoints claimed in a connected block to a pending claim request. The keys
+ // represent the outpoints that our `ChannelMonitor` has detected we have keys/scripts to
+ // claim. The values track the pending claim request identifier and the initial confirmation
+ // block height, and are immutable until the outpoint has enough confirmations to meet our
+ // [`ANTI_REORG_DELAY`]. The initial confirmation block height is used to remove the entry if
+ // the block gets disconnected.
#[cfg(test)] // Used in functional_test to verify sanitization
pub claimable_outpoints: HashMap<BitcoinOutPoint, (PackageID, u32)>,
#[cfg(not(test))]
pub(super) secp_ctx: Secp256k1<secp256k1::All>,
}
+impl<ChannelSigner: WriteableEcdsaChannelSigner> PartialEq for OnchainTxHandler<ChannelSigner> {
+ fn eq(&self, other: &Self) -> bool {
+ // `signer`, `secp_ctx`, and `pending_claim_events` are excluded on purpose.
+ self.destination_script == other.destination_script &&
+ self.holder_commitment == other.holder_commitment &&
+ self.holder_htlc_sigs == other.holder_htlc_sigs &&
+ self.prev_holder_commitment == other.prev_holder_commitment &&
+ self.prev_holder_htlc_sigs == other.prev_holder_htlc_sigs &&
+ self.channel_transaction_parameters == other.channel_transaction_parameters &&
+ self.pending_claim_requests == other.pending_claim_requests &&
+ self.claimable_outpoints == other.claimable_outpoints &&
+ self.locktimed_packages == other.locktimed_packages &&
+ self.onchain_events_awaiting_threshold_conf == other.onchain_events_awaiting_threshold_conf
+ }
+}
+
const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1;
pending_claim_requests,
onchain_events_awaiting_threshold_conf,
#[cfg(anchors)]
- pending_claim_events: HashMap::new(),
+ pending_claim_events: Vec::new(),
secp_ctx,
})
}
locktimed_packages: BTreeMap::new(),
onchain_events_awaiting_threshold_conf: Vec::new(),
#[cfg(anchors)]
- pending_claim_events: HashMap::new(),
-
+ pending_claim_events: Vec::new(),
secp_ctx,
}
}
#[cfg(anchors)]
pub(crate) fn get_and_clear_pending_claim_events(&mut self) -> Vec<ClaimEvent> {
- let mut ret = HashMap::new();
- swap(&mut ret, &mut self.pending_claim_events);
- ret.into_iter().map(|(_, event)| event).collect::<Vec<_>>()
+ let mut events = Vec::new();
+ swap(&mut events, &mut self.pending_claim_events);
+ events.into_iter().map(|(_, event)| event).collect()
}
/// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty
// transaction is reorged out.
let mut all_inputs_have_confirmed_spend = true;
for outpoint in request_outpoints.iter() {
- if let Some(first_claim_txid_height) = self.claimable_outpoints.get(*outpoint) {
+ if let Some((request_package_id, _)) = self.claimable_outpoints.get(*outpoint) {
// We check for outpoint spends within claims individually rather than as a set
// since requests can have outpoints split off.
if !self.onchain_events_awaiting_threshold_conf.iter()
.any(|event_entry| if let OnchainEvent::Claim { package_id } = event_entry.event {
- first_claim_txid_height.0 == package_id
+ *request_package_id == package_id
} else {
// The onchain event is not a claim, keep seeking until we find one.
false
OnchainClaim::Event(ClaimEvent::BumpHTLC {
target_feerate_sat_per_1000_weight,
htlcs,
+ tx_lock_time: PackedLockTime(cached_request.package_locktime(cur_height)),
}),
));
} else {
) {
assert!(new_feerate != 0);
- let transaction = cached_request.finalize_malleable_package(self, output_value, self.destination_script.clone(), logger).unwrap();
+ let transaction = cached_request.finalize_malleable_package(
+ cur_height, self, output_value, self.destination_script.clone(), logger
+ ).unwrap();
log_trace!(logger, "...with timer {} and feerate {}", new_timer.unwrap(), new_feerate);
assert!(predicted_weight >= transaction.weight());
return Some((new_timer, new_feerate, OnchainClaim::Tx(transaction)));
.find(|locked_package| locked_package.outpoints() == req.outpoints());
if let Some(package) = timelocked_equivalent_package {
log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
- req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_timelock());
+ req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_locktime(cur_height));
continue;
}
- if req.package_timelock() > cur_height + 1 {
- log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), cur_height);
+ let package_locktime = req.package_locktime(cur_height);
+ if package_locktime > cur_height + 1 {
+ log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", package_locktime, cur_height);
for outpoint in req.outpoints() {
log_info!(logger, " Outpoint {}", outpoint);
}
- self.locktimed_packages.entry(req.package_timelock()).or_insert(Vec::new()).push(req);
+ self.locktimed_packages.entry(package_locktime).or_insert(Vec::new()).push(req);
continue;
}
package_id
},
};
- self.pending_claim_events.insert(package_id, claim_event);
+ debug_assert_eq!(self.pending_claim_events.iter().filter(|entry| entry.0 == package_id).count(), 0);
+ self.pending_claim_events.push((package_id, claim_event));
package_id
},
};
// Scan all input to verify is one of the outpoint spent is of interest for us
let mut claimed_outputs_material = Vec::new();
for inp in &tx.input {
- if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) {
+ if let Some((package_id, _)) = self.claimable_outpoints.get(&inp.previous_output) {
// If outpoint has claim request pending on it...
- if let Some(request) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) {
+ if let Some(request) = self.pending_claim_requests.get_mut(package_id) {
//... we need to verify equality between transaction outpoints and claim request
// outpoints to know if transaction is the original claim or a bumped one issued
// by us.
txid: tx.txid(),
height: conf_height,
block_hash: Some(conf_hash),
- event: OnchainEvent::Claim { package_id: first_claim_txid_height.0 }
+ event: OnchainEvent::Claim { package_id: *package_id }
};
if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
self.onchain_events_awaiting_threshold_conf.push(entry);
}
//TODO: recompute soonest_timelock to avoid wasting a bit on fees
if at_least_one_drop {
- bump_candidates.insert(first_claim_txid_height.0.clone(), request.clone());
+ bump_candidates.insert(*package_id, request.clone());
+ // If we have any pending claim events for the request being updated
+ // that have yet to be consumed, we'll remove them since they will
+ // end up producing an invalid transaction by double spending
+ // input(s) that already have a confirmed spend. If such spend is
+ // reorged out of the chain, then we'll attempt to re-spend the
+ // inputs once we see it.
+ #[cfg(anchors)] {
+ #[cfg(debug_assertions)] {
+ let existing = self.pending_claim_events.iter()
+ .filter(|entry| entry.0 == *package_id).count();
+ assert!(existing == 0 || existing == 1);
+ }
+ self.pending_claim_events.retain(|entry| entry.0 != *package_id);
+ }
}
}
break; //No need to iterate further, either tx is our or their
log_debug!(logger, "Removing claim tracking for {} due to maturation of claim package {}.",
outpoint, log_bytes!(package_id));
self.claimable_outpoints.remove(outpoint);
- #[cfg(anchors)]
- self.pending_claim_events.remove(&package_id);
+ }
+ #[cfg(anchors)] {
+ #[cfg(debug_assertions)] {
+ let num_existing = self.pending_claim_events.iter()
+ .filter(|entry| entry.0 == package_id).count();
+ assert!(num_existing == 0 || num_existing == 1);
+ }
+ self.pending_claim_events.retain(|(id, _)| *id != package_id);
}
}
},
}
// Check if any pending claim request must be rescheduled
- for (first_claim_txid, ref request) in self.pending_claim_requests.iter() {
+ for (package_id, request) in self.pending_claim_requests.iter() {
if let Some(h) = request.timer() {
if cur_height >= h {
- bump_candidates.insert(*first_claim_txid, (*request).clone());
+ bump_candidates.insert(*package_id, request.clone());
}
}
}
// Build, bump and rebroadcast tx accordingly
log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
- for (first_claim_txid, request) in bump_candidates.iter() {
+ for (package_id, request) in bump_candidates.iter() {
if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(cur_height, &request, &*fee_estimator, &*logger) {
match bump_claim {
OnchainClaim::Tx(bump_tx) => {
#[cfg(anchors)]
OnchainClaim::Event(claim_event) => {
log_info!(logger, "Yielding RBF-bumped onchain event to spend inputs {:?}", request.outpoints());
- self.pending_claim_events.insert(*first_claim_txid, claim_event);
+ #[cfg(debug_assertions)] {
+ let num_existing = self.pending_claim_events.iter().
+ filter(|entry| entry.0 == *package_id).count();
+ assert!(num_existing == 0 || num_existing == 1);
+ }
+ self.pending_claim_events.retain(|event| event.0 != *package_id);
+ self.pending_claim_events.push((*package_id, claim_event));
},
}
- if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) {
+ if let Some(request) = self.pending_claim_requests.get_mut(package_id) {
request.set_timer(new_timer);
request.set_feerate(new_feerate);
}
//- resurect outpoint back in its claimable set and regenerate tx
match entry.event {
OnchainEvent::ContentiousOutpoint { package } => {
- if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(package.outpoints()[0]) {
- if let Some(request) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) {
+ if let Some(pending_claim) = self.claimable_outpoints.get(package.outpoints()[0]) {
+ if let Some(request) = self.pending_claim_requests.get_mut(&pending_claim.0) {
request.merge_package(package);
// Using a HashMap guarantee us than if we have multiple outpoints getting
// resurrected only one bump claim tx is going to be broadcast
- bump_candidates.insert(ancestor_claimable_txid.clone(), request.clone());
+ bump_candidates.insert(pending_claim.clone(), request.clone());
}
}
},
self.onchain_events_awaiting_threshold_conf.push(entry);
}
}
- for (_first_claim_txid_height, request) in bump_candidates.iter_mut() {
+ for ((_package_id, _), ref mut request) in bump_candidates.iter_mut() {
if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(height, &request, fee_estimator, &&*logger) {
request.set_timer(new_timer);
request.set_feerate(new_feerate);
#[cfg(anchors)]
OnchainClaim::Event(claim_event) => {
log_info!(logger, "Yielding onchain event after reorg to spend inputs {:?}", request.outpoints());
- self.pending_claim_events.insert(_first_claim_txid_height.0, claim_event);
+ #[cfg(debug_assertions)] {
+ let num_existing = self.pending_claim_events.iter()
+ .filter(|entry| entry.0 == *_package_id).count();
+ assert!(num_existing == 0 || num_existing == 1);
+ }
+ self.pending_claim_events.retain(|event| event.0 != *_package_id);
+ self.pending_claim_events.push((*_package_id, claim_event));
},
}
}
let chan_keys = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint);
let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, onchain_handler.opt_anchors(), &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key);
- bumped_tx.lock_time = PackedLockTime(outp.htlc.cltv_expiry); // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
if let Ok(sig) = onchain_handler.signer.sign_counterparty_htlc_transaction(&bumped_tx, i, &outp.htlc.amount_msat / 1000, &outp.per_commitment_point, &outp.htlc, &onchain_handler.secp_ctx) {
let mut ser_sig = sig.serialize_der().to_vec();
ser_sig.push(EcdsaSighashType::All as u8);
_ => { panic!("API Error!"); }
}
}
- fn absolute_tx_timelock(&self, output_conf_height: u32) -> u32 {
- // Get the absolute timelock at which this output can be spent given the height at which
- // this output was confirmed. We use `output_conf_height + 1` as a safe default as we can
- // be confirmed in the next block and transactions with time lock `current_height + 1`
- // always propagate.
+ fn absolute_tx_timelock(&self, current_height: u32) -> u32 {
+ // We use `current_height + 1` as our default locktime to discourage fee sniping and because
+ // transactions with it always propagate.
let absolute_timelock = match self {
- PackageSolvingData::RevokedOutput(_) => output_conf_height + 1,
- PackageSolvingData::RevokedHTLCOutput(_) => output_conf_height + 1,
- PackageSolvingData::CounterpartyOfferedHTLCOutput(_) => output_conf_height + 1,
- PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => cmp::max(outp.htlc.cltv_expiry, output_conf_height + 1),
- PackageSolvingData::HolderHTLCOutput(ref outp) => cmp::max(outp.cltv_expiry, output_conf_height + 1),
- PackageSolvingData::HolderFundingOutput(_) => output_conf_height + 1,
+ PackageSolvingData::RevokedOutput(_) => current_height + 1,
+ PackageSolvingData::RevokedHTLCOutput(_) => current_height + 1,
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(_) => current_height + 1,
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => cmp::max(outp.htlc.cltv_expiry, current_height + 1),
+ // HTLC timeout/success transactions rely on a fixed timelock due to the counterparty's
+ // signature.
+ PackageSolvingData::HolderHTLCOutput(ref outp) => {
+ if outp.preimage.is_some() {
+ debug_assert_eq!(outp.cltv_expiry, 0);
+ }
+ outp.cltv_expiry
+ },
+ PackageSolvingData::HolderFundingOutput(_) => current_height + 1,
};
absolute_timelock
}
}
amounts
}
- pub(crate) fn package_timelock(&self) -> u32 {
- self.inputs.iter().map(|(_, outp)| outp.absolute_tx_timelock(self.height_original))
- .max().expect("There must always be at least one output to spend in a PackageTemplate")
+ pub(crate) fn package_locktime(&self, current_height: u32) -> u32 {
+ let locktime = self.inputs.iter().map(|(_, outp)| outp.absolute_tx_timelock(current_height))
+ .max().expect("There must always be at least one output to spend in a PackageTemplate");
+
+ // If we ever try to aggregate a `HolderHTLCOutput`s with another output type, we'll likely
+ // end up with an incorrect transaction locktime since the counterparty has included it in
+ // its HTLC signature. This should never happen unless we decide to aggregate outputs across
+ // different channel commitments.
+ #[cfg(debug_assertions)] {
+ if self.inputs.iter().any(|(_, outp)|
+ if let PackageSolvingData::HolderHTLCOutput(outp) = outp {
+ outp.preimage.is_some()
+ } else {
+ false
+ }
+ ) {
+ debug_assert_eq!(locktime, 0);
+ };
+ for timeout_htlc_expiry in self.inputs.iter().filter_map(|(_, outp)|
+ if let PackageSolvingData::HolderHTLCOutput(outp) = outp {
+ if outp.preimage.is_none() {
+ Some(outp.cltv_expiry)
+ } else { None }
+ } else { None }
+ ) {
+ debug_assert_eq!(locktime, timeout_htlc_expiry);
+ }
+ }
+
+ locktime
}
pub(crate) fn package_weight(&self, destination_script: &Script) -> usize {
let mut inputs_weight = 0;
htlcs
}
pub(crate) fn finalize_malleable_package<L: Deref, Signer: WriteableEcdsaChannelSigner>(
- &self, onchain_handler: &mut OnchainTxHandler<Signer>, value: u64, destination_script: Script, logger: &L
+ &self, current_height: u32, onchain_handler: &mut OnchainTxHandler<Signer>, value: u64,
+ destination_script: Script, logger: &L
) -> Option<Transaction> where L::Target: Logger {
debug_assert!(self.is_malleable());
let mut bumped_tx = Transaction {
version: 2,
- lock_time: PackedLockTime::ZERO,
+ lock_time: PackedLockTime(self.package_locktime(current_height)),
input: vec![],
output: vec![TxOut {
script_pubkey: destination_script,
}
/// Converts this OutPoint into the OutPoint field as used by rust-bitcoin
- /// (C-not exported) as the same type is used universally in the C bindings for all outpoints
+ ///
+ /// This is not exported to bindings users as the same type is used universally in the C bindings
+ /// for all outpoints
pub fn into_bitcoin_outpoint(self) -> BitcoinOutPoint {
BitcoinOutPoint {
txid: self.txid,
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Utitilies for bumping transactions originating from [`super::Event`]s.
+
+use crate::ln::PaymentPreimage;
+use crate::ln::chan_utils;
+use crate::ln::chan_utils::{ChannelTransactionParameters, HTLCOutputInCommitment};
+
+use bitcoin::{OutPoint, PackedLockTime, Script, Transaction, Txid, TxIn, TxOut, Witness};
+use bitcoin::secp256k1;
+use bitcoin::secp256k1::{PublicKey, Secp256k1};
+use bitcoin::secp256k1::ecdsa::Signature;
+
+/// A descriptor used to sign for a commitment transaction's anchor output.
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct AnchorDescriptor {
+ /// A unique identifier used along with `channel_value_satoshis` to re-derive the
+ /// [`InMemorySigner`] required to sign `input`.
+ ///
+ /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
+ pub channel_keys_id: [u8; 32],
+ /// The value in satoshis of the channel we're attempting to spend the anchor output of. This is
+ /// used along with `channel_keys_id` to re-derive the [`InMemorySigner`] required to sign
+ /// `input`.
+ ///
+ /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
+ pub channel_value_satoshis: u64,
+ /// The transaction input's outpoint corresponding to the commitment transaction's anchor
+ /// output.
+ pub outpoint: OutPoint,
+}
+
+/// A descriptor used to sign for a commitment transaction's HTLC output.
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct HTLCDescriptor {
+ /// A unique identifier used along with `channel_value_satoshis` to re-derive the
+ /// [`InMemorySigner`] required to sign `input`.
+ ///
+ /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
+ pub channel_keys_id: [u8; 32],
+ /// The value in satoshis of the channel we're attempting to spend the anchor output of. This is
+ /// used along with `channel_keys_id` to re-derive the [`InMemorySigner`] required to sign
+ /// `input`.
+ ///
+ /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
+ pub channel_value_satoshis: u64,
+ /// The necessary channel parameters that need to be provided to the re-derived
+ /// [`InMemorySigner`] through [`ChannelSigner::provide_channel_parameters`].
+ ///
+ /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
+ /// [`ChannelSigner::provide_channel_parameters`]: crate::chain::keysinterface::ChannelSigner::provide_channel_parameters
+ pub channel_parameters: ChannelTransactionParameters,
+ /// The txid of the commitment transaction in which the HTLC output lives.
+ pub commitment_txid: Txid,
+ /// The number of the commitment transaction in which the HTLC output lives.
+ pub per_commitment_number: u64,
+ /// The details of the HTLC as it appears in the commitment transaction.
+ pub htlc: HTLCOutputInCommitment,
+ /// The preimage, if `Some`, to claim the HTLC output with. If `None`, the timeout path must be
+ /// taken.
+ pub preimage: Option<PaymentPreimage>,
+ /// The counterparty's signature required to spend the HTLC output.
+ pub counterparty_sig: Signature
+}
+
+impl HTLCDescriptor {
+ /// Returns the unsigned transaction input spending the HTLC output in the commitment
+ /// transaction.
+ pub fn unsigned_tx_input(&self) -> TxIn {
+ chan_utils::build_htlc_input(&self.commitment_txid, &self.htlc, true /* opt_anchors */)
+ }
+
+ /// Returns the delayed output created as a result of spending the HTLC output in the commitment
+ /// transaction.
+ pub fn tx_output<C: secp256k1::Signing + secp256k1::Verification>(
+ &self, per_commitment_point: &PublicKey, secp: &Secp256k1<C>
+ ) -> TxOut {
+ let channel_params = self.channel_parameters.as_holder_broadcastable();
+ let broadcaster_keys = channel_params.broadcaster_pubkeys();
+ let counterparty_keys = channel_params.countersignatory_pubkeys();
+ let broadcaster_delayed_key = chan_utils::derive_public_key(
+ secp, per_commitment_point, &broadcaster_keys.delayed_payment_basepoint
+ );
+ let counterparty_revocation_key = chan_utils::derive_public_revocation_key(
+ secp, per_commitment_point, &counterparty_keys.revocation_basepoint
+ );
+ chan_utils::build_htlc_output(
+ 0 /* feerate_per_kw */, channel_params.contest_delay(), &self.htlc, true /* opt_anchors */,
+ false /* use_non_zero_fee_anchors */, &broadcaster_delayed_key, &counterparty_revocation_key
+ )
+ }
+
+ /// Returns the witness script of the HTLC output in the commitment transaction.
+ pub fn witness_script<C: secp256k1::Signing + secp256k1::Verification>(
+ &self, per_commitment_point: &PublicKey, secp: &Secp256k1<C>
+ ) -> Script {
+ let channel_params = self.channel_parameters.as_holder_broadcastable();
+ let broadcaster_keys = channel_params.broadcaster_pubkeys();
+ let counterparty_keys = channel_params.countersignatory_pubkeys();
+ let broadcaster_htlc_key = chan_utils::derive_public_key(
+ secp, per_commitment_point, &broadcaster_keys.htlc_basepoint
+ );
+ let counterparty_htlc_key = chan_utils::derive_public_key(
+ secp, per_commitment_point, &counterparty_keys.htlc_basepoint
+ );
+ let counterparty_revocation_key = chan_utils::derive_public_revocation_key(
+ secp, per_commitment_point, &counterparty_keys.revocation_basepoint
+ );
+ chan_utils::get_htlc_redeemscript_with_explicit_keys(
+ &self.htlc, true /* opt_anchors */, &broadcaster_htlc_key, &counterparty_htlc_key,
+ &counterparty_revocation_key,
+ )
+ }
+
+ /// Returns the fully signed witness required to spend the HTLC output in the commitment
+ /// transaction.
+ pub fn tx_input_witness(&self, signature: &Signature, witness_script: &Script) -> Witness {
+ chan_utils::build_htlc_input_witness(
+ signature, &self.counterparty_sig, &self.preimage, witness_script, true /* opt_anchors */
+ )
+ }
+}
+
+/// Represents the different types of transactions, originating from LDK, to be bumped.
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum BumpTransactionEvent {
+ /// Indicates that a channel featuring anchor outputs is to be closed by broadcasting the local
+ /// commitment transaction. Since commitment transactions have a static feerate pre-agreed upon,
+ /// they may need additional fees to be attached through a child transaction using the popular
+ /// [Child-Pays-For-Parent](https://bitcoinops.org/en/topics/cpfp) fee bumping technique. This
+ /// child transaction must include the anchor input described within `anchor_descriptor` along
+ /// with additional inputs to meet the target feerate. Failure to meet the target feerate
+ /// decreases the confirmation odds of the transaction package (which includes the commitment
+ /// and child anchor transactions), possibly resulting in a loss of funds. Once the transaction
+ /// is constructed, it must be fully signed for and broadcast by the consumer of the event
+ /// along with the `commitment_tx` enclosed. Note that the `commitment_tx` must always be
+ /// broadcast first, as the child anchor transaction depends on it.
+ ///
+ /// The consumer should be able to sign for any of the additional inputs included within the
+ /// child anchor transaction. To sign its anchor input, an [`InMemorySigner`] should be
+ /// re-derived through [`KeysManager::derive_channel_keys`] with the help of
+ /// [`AnchorDescriptor::channel_keys_id`] and [`AnchorDescriptor::channel_value_satoshis`]. The
+ /// anchor input signature can be computed with [`EcdsaChannelSigner::sign_holder_anchor_input`],
+ /// which can then be provided to [`build_anchor_input_witness`] along with the `funding_pubkey`
+ /// to obtain the full witness required to spend.
+ ///
+ /// It is possible to receive more than one instance of this event if a valid child anchor
+ /// transaction is never broadcast or is but not with a sufficient fee to be mined. Care should
+ /// be taken by the consumer of the event to ensure any future iterations of the child anchor
+ /// transaction adhere to the [Replace-By-Fee
+ /// rules](https://github.com/bitcoin/bitcoin/blob/master/doc/policy/mempool-replacements.md)
+ /// for fee bumps to be accepted into the mempool, and eventually the chain. As the frequency of
+ /// these events is not user-controlled, users may ignore/drop the event if they are no longer
+ /// able to commit external confirmed funds to the child anchor transaction.
+ ///
+ /// The set of `pending_htlcs` on the commitment transaction to be broadcast can be inspected to
+ /// determine whether a significant portion of the channel's funds are allocated to HTLCs,
+ /// enabling users to make their own decisions regarding the importance of the commitment
+ /// transaction's confirmation. Note that this is not required, but simply exists as an option
+ /// for users to override LDK's behavior. On commitments with no HTLCs (indicated by those with
+ /// an empty `pending_htlcs`), confirmation of the commitment transaction can be considered to
+ /// be not urgent.
+ ///
+ /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
+ /// [`KeysManager::derive_channel_keys`]: crate::chain::keysinterface::KeysManager::derive_channel_keys
+ /// [`EcdsaChannelSigner::sign_holder_anchor_input`]: crate::chain::keysinterface::EcdsaChannelSigner::sign_holder_anchor_input
+ /// [`build_anchor_input_witness`]: crate::ln::chan_utils::build_anchor_input_witness
+ ChannelClose {
+ /// The target feerate that the transaction package, which consists of the commitment
+ /// transaction and the to-be-crafted child anchor transaction, must meet.
+ package_target_feerate_sat_per_1000_weight: u32,
+ /// The channel's commitment transaction to bump the fee of. This transaction should be
+ /// broadcast along with the anchor transaction constructed as a result of consuming this
+ /// event.
+ commitment_tx: Transaction,
+ /// The absolute fee in satoshis of the commitment transaction. This can be used along the
+ /// with weight of the commitment transaction to determine its feerate.
+ commitment_tx_fee_satoshis: u64,
+ /// The descriptor to sign the anchor input of the anchor transaction constructed as a
+ /// result of consuming this event.
+ anchor_descriptor: AnchorDescriptor,
+ /// The set of pending HTLCs on the commitment transaction that need to be resolved once the
+ /// commitment transaction confirms.
+ pending_htlcs: Vec<HTLCOutputInCommitment>,
+ },
+ /// Indicates that a channel featuring anchor outputs has unilaterally closed on-chain by a
+ /// holder commitment transaction and its HTLC(s) need to be resolved on-chain. With the
+ /// zero-HTLC-transaction-fee variant of anchor outputs, the pre-signed HTLC
+ /// transactions have a zero fee, thus requiring additional inputs and/or outputs to be attached
+ /// for a timely confirmation within the chain. These additional inputs and/or outputs must be
+ /// appended to the resulting HTLC transaction to meet the target feerate. Failure to meet the
+ /// target feerate decreases the confirmation odds of the transaction, possibly resulting in a
+ /// loss of funds. Once the transaction meets the target feerate, it must be signed for and
+ /// broadcast by the consumer of the event.
+ ///
+ /// The consumer should be able to sign for any of the non-HTLC inputs added to the resulting
+ /// HTLC transaction. To sign HTLC inputs, an [`InMemorySigner`] should be re-derived through
+ /// [`KeysManager::derive_channel_keys`] with the help of `channel_keys_id` and
+ /// `channel_value_satoshis`. Each HTLC input's signature can be computed with
+ /// [`EcdsaChannelSigner::sign_holder_htlc_transaction`], which can then be provided to
+ /// [`HTLCDescriptor::tx_input_witness`] to obtain the fully signed witness required to spend.
+ ///
+ /// It is possible to receive more than one instance of this event if a valid HTLC transaction
+ /// is never broadcast or is but not with a sufficient fee to be mined. Care should be taken by
+ /// the consumer of the event to ensure any future iterations of the HTLC transaction adhere to
+ /// the [Replace-By-Fee
+ /// rules](https://github.com/bitcoin/bitcoin/blob/master/doc/policy/mempool-replacements.md)
+ /// for fee bumps to be accepted into the mempool, and eventually the chain. As the frequency of
+ /// these events is not user-controlled, users may ignore/drop the event if either they are no
+ /// longer able to commit external confirmed funds to the HTLC transaction or the fee committed
+ /// to the HTLC transaction is greater in value than the HTLCs being claimed.
+ ///
+ /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
+ /// [`KeysManager::derive_channel_keys`]: crate::chain::keysinterface::KeysManager::derive_channel_keys
+ /// [`EcdsaChannelSigner::sign_holder_htlc_transaction`]: crate::chain::keysinterface::EcdsaChannelSigner::sign_holder_htlc_transaction
+ /// [`HTLCDescriptor::tx_input_witness`]: HTLCDescriptor::tx_input_witness
+ HTLCResolution {
+ /// The target feerate that the resulting HTLC transaction must meet.
+ target_feerate_sat_per_1000_weight: u32,
+ /// The set of pending HTLCs on the confirmed commitment that need to be claimed, preferably
+ /// by the same transaction.
+ htlc_descriptors: Vec<HTLCDescriptor>,
+ /// The locktime required for the resulting HTLC transaction.
+ tx_lock_time: PackedLockTime,
+ },
+}
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Events are returned from various bits in the library which indicate some action must be taken
+//! by the client.
+//!
+//! Because we don't have a built-in runtime, it's up to the client to call events at a time in the
+//! future, as well as generate and broadcast funding transactions handle payment preimages and a
+//! few other things.
+
+#[cfg(anchors)]
+pub mod bump_transaction;
+
+#[cfg(anchors)]
+pub use bump_transaction::BumpTransactionEvent;
+
+use crate::chain::keysinterface::SpendableOutputDescriptor;
+use crate::ln::channelmanager::{InterceptId, PaymentId};
+use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS;
+use crate::ln::features::ChannelTypeFeatures;
+use crate::ln::msgs;
+use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+use crate::routing::gossip::NetworkUpdate;
+use crate::util::errors::APIError;
+use crate::util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, RequiredWrapper, UpgradableRequired, WithoutLength};
+use crate::util::string::UntrustedString;
+use crate::routing::router::{RouteHop, RouteParameters};
+
+use bitcoin::{PackedLockTime, Transaction, OutPoint};
+#[cfg(anchors)]
+use bitcoin::{Txid, TxIn, TxOut, Witness};
+use bitcoin::blockdata::script::Script;
+use bitcoin::hashes::Hash;
+use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::secp256k1::PublicKey;
+use crate::io;
+use crate::prelude::*;
+use core::time::Duration;
+use core::ops::Deref;
+use crate::sync::Arc;
+
+/// Some information provided on receipt of payment depends on whether the payment received is a
+/// spontaneous payment or a "conventional" lightning payment that's paying an invoice.
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum PaymentPurpose {
+ /// Information for receiving a payment that we generated an invoice for.
+ InvoicePayment {
+ /// The preimage to the payment_hash, if the payment hash (and secret) were fetched via
+ /// [`ChannelManager::create_inbound_payment`]. If provided, this can be handed directly to
+ /// [`ChannelManager::claim_funds`].
+ ///
+ /// [`ChannelManager::create_inbound_payment`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment
+ /// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds
+ payment_preimage: Option<PaymentPreimage>,
+ /// The "payment secret". This authenticates the sender to the recipient, preventing a
+ /// number of deanonymization attacks during the routing process.
+ /// It is provided here for your reference, however its accuracy is enforced directly by
+ /// [`ChannelManager`] using the values you previously provided to
+ /// [`ChannelManager::create_inbound_payment`] or
+ /// [`ChannelManager::create_inbound_payment_for_hash`].
+ ///
+ /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
+ /// [`ChannelManager::create_inbound_payment`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment
+ /// [`ChannelManager::create_inbound_payment_for_hash`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment_for_hash
+ payment_secret: PaymentSecret,
+ },
+ /// Because this is a spontaneous payment, the payer generated their own preimage rather than us
+ /// (the payee) providing a preimage.
+ SpontaneousPayment(PaymentPreimage),
+}
+
+impl_writeable_tlv_based_enum!(PaymentPurpose,
+ (0, InvoicePayment) => {
+ (0, payment_preimage, option),
+ (2, payment_secret, required),
+ };
+ (2, SpontaneousPayment)
+);
+
+/// When the payment path failure took place and extra details about it. [`PathFailure::OnPath`] may
+/// contain a [`NetworkUpdate`] that needs to be applied to the [`NetworkGraph`].
+///
+/// [`NetworkUpdate`]: crate::routing::gossip::NetworkUpdate
+/// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub enum PathFailure {
+ /// We failed to initially send the payment and no HTLC was committed to. Contains the relevant
+ /// error.
+ InitialSend {
+ /// The error surfaced from initial send.
+ err: APIError,
+ },
+ /// A hop on the path failed to forward our payment.
+ OnPath {
+ /// If present, this [`NetworkUpdate`] should be applied to the [`NetworkGraph`] so that routing
+ /// decisions can take into account the update.
+ ///
+ /// [`NetworkUpdate`]: crate::routing::gossip::NetworkUpdate
+ /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph
+ network_update: Option<NetworkUpdate>,
+ },
+}
+
+impl_writeable_tlv_based_enum_upgradable!(PathFailure,
+ (0, OnPath) => {
+ (0, network_update, upgradable_option),
+ },
+ (2, InitialSend) => {
+ (0, err, upgradable_required),
+ },
+);
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+/// The reason the channel was closed. See individual variants more details.
+pub enum ClosureReason {
+ /// Closure generated from receiving a peer error message.
+ ///
+ /// Our counterparty may have broadcasted their latest commitment state, and we have
+ /// as well.
+ CounterpartyForceClosed {
+ /// The error which the peer sent us.
+ ///
+ /// Be careful about printing the peer_msg, a well-crafted message could exploit
+ /// a security vulnerability in the terminal emulator or the logging subsystem.
+ /// To be safe, use `Display` on `UntrustedString`
+ ///
+ /// [`UntrustedString`]: crate::util::string::UntrustedString
+ peer_msg: UntrustedString,
+ },
+ /// Closure generated from [`ChannelManager::force_close_channel`], called by the user.
+ ///
+ /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel.
+ HolderForceClosed,
+ /// The channel was closed after negotiating a cooperative close and we've now broadcasted
+ /// the cooperative close transaction. Note the shutdown may have been initiated by us.
+ //TODO: split between CounterpartyInitiated/LocallyInitiated
+ CooperativeClosure,
+ /// A commitment transaction was confirmed on chain, closing the channel. Most likely this
+ /// commitment transaction came from our counterparty, but it may also have come from
+ /// a copy of our own `ChannelMonitor`.
+ CommitmentTxConfirmed,
+ /// The funding transaction failed to confirm in a timely manner on an inbound channel.
+ FundingTimedOut,
+ /// Closure generated from processing an event, likely a HTLC forward/relay/reception.
+ ProcessingError {
+ /// A developer-readable error message which we generated.
+ err: String,
+ },
+ /// The peer disconnected prior to funding completing. In this case the spec mandates that we
+ /// forget the channel entirely - we can attempt again if the peer reconnects.
+ ///
+ /// This includes cases where we restarted prior to funding completion, including prior to the
+ /// initial [`ChannelMonitor`] persistence completing.
+ ///
+ /// In LDK versions prior to 0.0.107 this could also occur if we were unable to connect to the
+ /// peer because of mutual incompatibility between us and our channel counterparty.
+ ///
+ /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
+ DisconnectedPeer,
+ /// Closure generated from `ChannelManager::read` if the [`ChannelMonitor`] is newer than
+ /// the [`ChannelManager`] deserialized.
+ ///
+ /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
+ /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
+ OutdatedChannelManager
+}
+
+impl core::fmt::Display for ClosureReason {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
+ f.write_str("Channel closed because ")?;
+ match self {
+ ClosureReason::CounterpartyForceClosed { peer_msg } => {
+ f.write_fmt(format_args!("counterparty force-closed with message: {}", peer_msg))
+ },
+ ClosureReason::HolderForceClosed => f.write_str("user manually force-closed the channel"),
+ ClosureReason::CooperativeClosure => f.write_str("the channel was cooperatively closed"),
+ ClosureReason::CommitmentTxConfirmed => f.write_str("commitment or closing transaction was confirmed on chain."),
+ ClosureReason::FundingTimedOut => write!(f, "funding transaction failed to confirm within {} blocks", FUNDING_CONF_DEADLINE_BLOCKS),
+ ClosureReason::ProcessingError { err } => {
+ f.write_str("of an exception: ")?;
+ f.write_str(&err)
+ },
+ ClosureReason::DisconnectedPeer => f.write_str("the peer disconnected prior to the channel being funded"),
+ ClosureReason::OutdatedChannelManager => f.write_str("the ChannelManager read from disk was stale compared to ChannelMonitor(s)"),
+ }
+ }
+}
+
+impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
+ (0, CounterpartyForceClosed) => { (1, peer_msg, required) },
+ (1, FundingTimedOut) => {},
+ (2, HolderForceClosed) => {},
+ (6, CommitmentTxConfirmed) => {},
+ (4, CooperativeClosure) => {},
+ (8, ProcessingError) => { (1, err, required) },
+ (10, DisconnectedPeer) => {},
+ (12, OutdatedChannelManager) => {},
+);
+
+/// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`].
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum HTLCDestination {
+ /// We tried forwarding to a channel but failed to do so. An example of such an instance is when
+ /// there is insufficient capacity in our outbound channel.
+ NextHopChannel {
+ /// The `node_id` of the next node. For backwards compatibility, this field is
+ /// marked as optional, versions prior to 0.0.110 may not always be able to provide
+ /// counterparty node information.
+ node_id: Option<PublicKey>,
+ /// The outgoing `channel_id` between us and the next node.
+ channel_id: [u8; 32],
+ },
+ /// Scenario where we are unsure of the next node to forward the HTLC to.
+ UnknownNextHop {
+ /// Short channel id we are requesting to forward an HTLC to.
+ requested_forward_scid: u64,
+ },
+ /// We couldn't forward to the outgoing scid. An example would be attempting to send a duplicate
+ /// intercept HTLC.
+ InvalidForward {
+ /// Short channel id we are requesting to forward an HTLC to.
+ requested_forward_scid: u64
+ },
+ /// Failure scenario where an HTLC may have been forwarded to be intended for us,
+ /// but is invalid for some reason, so we reject it.
+ ///
+ /// Some of the reasons may include:
+ /// * HTLC Timeouts
+ /// * Expected MPP amount has already been reached
+ /// * Claimable amount does not match expected amount
+ FailedPayment {
+ /// The payment hash of the payment we attempted to process.
+ payment_hash: PaymentHash
+ },
+}
+
+impl_writeable_tlv_based_enum_upgradable!(HTLCDestination,
+ (0, NextHopChannel) => {
+ (0, node_id, required),
+ (2, channel_id, required),
+ },
+ (1, InvalidForward) => {
+ (0, requested_forward_scid, required),
+ },
+ (2, UnknownNextHop) => {
+ (0, requested_forward_scid, required),
+ },
+ (4, FailedPayment) => {
+ (0, payment_hash, required),
+ },
+);
+
+/// Will be used in [`Event::HTLCIntercepted`] to identify the next hop in the HTLC's path.
+/// Currently only used in serialization for the sake of maintaining compatibility. More variants
+/// will be added for general-purpose HTLC forward intercepts as well as trampoline forward
+/// intercepts in upcoming work.
+enum InterceptNextHop {
+ FakeScid {
+ requested_next_hop_scid: u64,
+ },
+}
+
+impl_writeable_tlv_based_enum!(InterceptNextHop,
+ (0, FakeScid) => {
+ (0, requested_next_hop_scid, required),
+ };
+);
+
+/// An Event which you should probably take some action in response to.
+///
+/// Note that while Writeable and Readable are implemented for Event, you probably shouldn't use
+/// them directly as they don't round-trip exactly (for example FundingGenerationReady is never
+/// written as it makes no sense to respond to it after reconnecting to peers).
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum Event {
+ /// Used to indicate that the client should generate a funding transaction with the given
+ /// parameters and then call [`ChannelManager::funding_transaction_generated`].
+ /// Generated in [`ChannelManager`] message handling.
+ /// Note that *all inputs* in the funding transaction must spend SegWit outputs or your
+ /// counterparty can steal your funds!
+ ///
+ /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
+ /// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated
+ FundingGenerationReady {
+ /// The random channel_id we picked which you'll need to pass into
+ /// [`ChannelManager::funding_transaction_generated`].
+ ///
+ /// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated
+ temporary_channel_id: [u8; 32],
+ /// The counterparty's node_id, which you'll need to pass back into
+ /// [`ChannelManager::funding_transaction_generated`].
+ ///
+ /// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated
+ counterparty_node_id: PublicKey,
+ /// The value, in satoshis, that the output should have.
+ channel_value_satoshis: u64,
+ /// The script which should be used in the transaction output.
+ output_script: Script,
+ /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`], or a
+ /// random value for an inbound channel. This may be zero for objects serialized with LDK
+ /// versions prior to 0.0.113.
+ ///
+ /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
+ user_channel_id: u128,
+ },
+ /// Indicates that we've been offered a payment and it needs to be claimed via calling
+ /// [`ChannelManager::claim_funds`] with the preimage given in [`PaymentPurpose`].
+ ///
+ /// Note that if the preimage is not known, you should call
+ /// [`ChannelManager::fail_htlc_backwards`] or [`ChannelManager::fail_htlc_backwards_with_reason`]
+ /// to free up resources for this HTLC and avoid network congestion.
+ /// If you fail to call either [`ChannelManager::claim_funds`], [`ChannelManager::fail_htlc_backwards`],
+ /// or [`ChannelManager::fail_htlc_backwards_with_reason`] within the HTLC's timeout, the HTLC will be
+ /// automatically failed.
+ ///
+ /// # Note
+ /// LDK will not stop an inbound payment from being paid multiple times, so multiple
+ /// `PaymentClaimable` events may be generated for the same payment.
+ ///
+ /// # Note
+ /// This event used to be called `PaymentReceived` in LDK versions 0.0.112 and earlier.
+ ///
+ /// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds
+ /// [`ChannelManager::fail_htlc_backwards`]: crate::ln::channelmanager::ChannelManager::fail_htlc_backwards
+ /// [`ChannelManager::fail_htlc_backwards_with_reason`]: crate::ln::channelmanager::ChannelManager::fail_htlc_backwards_with_reason
+ PaymentClaimable {
+ /// The node that will receive the payment after it has been claimed.
+ /// This is useful to identify payments received via [phantom nodes].
+ /// This field will always be filled in when the event was generated by LDK versions
+ /// 0.0.113 and above.
+ ///
+ /// [phantom nodes]: crate::chain::keysinterface::PhantomKeysManager
+ receiver_node_id: Option<PublicKey>,
+ /// The hash for which the preimage should be handed to the ChannelManager. Note that LDK will
+ /// not stop you from registering duplicate payment hashes for inbound payments.
+ payment_hash: PaymentHash,
+ /// The value, in thousandths of a satoshi, that this payment is for.
+ amount_msat: u64,
+ /// Information for claiming this received payment, based on whether the purpose of the
+ /// payment is to pay an invoice or to send a spontaneous payment.
+ purpose: PaymentPurpose,
+ /// The `channel_id` indicating over which channel we received the payment.
+ via_channel_id: Option<[u8; 32]>,
+ /// The `user_channel_id` indicating over which channel we received the payment.
+ via_user_channel_id: Option<u128>,
+ },
+ /// Indicates a payment has been claimed and we've received money!
+ ///
+ /// This most likely occurs when [`ChannelManager::claim_funds`] has been called in response
+ /// to an [`Event::PaymentClaimable`]. However, if we previously crashed during a
+ /// [`ChannelManager::claim_funds`] call you may see this event without a corresponding
+ /// [`Event::PaymentClaimable`] event.
+ ///
+ /// # Note
+ /// LDK will not stop an inbound payment from being paid multiple times, so multiple
+ /// `PaymentClaimable` events may be generated for the same payment. If you then call
+ /// [`ChannelManager::claim_funds`] twice for the same [`Event::PaymentClaimable`] you may get
+ /// multiple `PaymentClaimed` events.
+ ///
+ /// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds
+ PaymentClaimed {
+ /// The node that received the payment.
+ /// This is useful to identify payments which were received via [phantom nodes].
+ /// This field will always be filled in when the event was generated by LDK versions
+ /// 0.0.113 and above.
+ ///
+ /// [phantom nodes]: crate::chain::keysinterface::PhantomKeysManager
+ receiver_node_id: Option<PublicKey>,
+ /// The payment hash of the claimed payment. Note that LDK will not stop you from
+ /// registering duplicate payment hashes for inbound payments.
+ payment_hash: PaymentHash,
+ /// The value, in thousandths of a satoshi, that this payment is for.
+ amount_msat: u64,
+ /// The purpose of the claimed payment, i.e. whether the payment was for an invoice or a
+ /// spontaneous payment.
+ purpose: PaymentPurpose,
+ },
+ /// Indicates an outbound payment we made succeeded (i.e. it made it all the way to its target
+ /// and we got back the payment preimage for it).
+ ///
+ /// Note for MPP payments: in rare cases, this event may be preceded by a `PaymentPathFailed`
+ /// event. In this situation, you SHOULD treat this payment as having succeeded.
+ PaymentSent {
+ /// The id returned by [`ChannelManager::send_payment`].
+ ///
+ /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+ payment_id: Option<PaymentId>,
+ /// The preimage to the hash given to ChannelManager::send_payment.
+ /// Note that this serves as a payment receipt, if you wish to have such a thing, you must
+ /// store it somehow!
+ payment_preimage: PaymentPreimage,
+ /// The hash that was given to [`ChannelManager::send_payment`].
+ ///
+ /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+ payment_hash: PaymentHash,
+ /// The total fee which was spent at intermediate hops in this payment, across all paths.
+ ///
+ /// Note that, like [`Route::get_total_fees`] this does *not* include any potential
+ /// overpayment to the recipient node.
+ ///
+ /// If the recipient or an intermediate node misbehaves and gives us free money, this may
+ /// overstate the amount paid, though this is unlikely.
+ ///
+ /// [`Route::get_total_fees`]: crate::routing::router::Route::get_total_fees
+ fee_paid_msat: Option<u64>,
+ },
+ /// Indicates an outbound payment failed. Individual [`Event::PaymentPathFailed`] events
+ /// provide failure information for each path attempt in the payment, including retries.
+ ///
+ /// This event is provided once there are no further pending HTLCs for the payment and the
+ /// payment is no longer retryable, due either to the [`Retry`] provided or
+ /// [`ChannelManager::abandon_payment`] having been called for the corresponding payment.
+ ///
+ /// [`Retry`]: crate::ln::channelmanager::Retry
+ /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
+ PaymentFailed {
+ /// The id returned by [`ChannelManager::send_payment`] and used with
+ /// [`ChannelManager::abandon_payment`].
+ ///
+ /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+ /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
+ payment_id: PaymentId,
+ /// The hash that was given to [`ChannelManager::send_payment`].
+ ///
+ /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+ payment_hash: PaymentHash,
+ },
+ /// Indicates that a path for an outbound payment was successful.
+ ///
+ /// Always generated after [`Event::PaymentSent`] and thus useful for scoring channels. See
+ /// [`Event::PaymentSent`] for obtaining the payment preimage.
+ PaymentPathSuccessful {
+ /// The id returned by [`ChannelManager::send_payment`].
+ ///
+ /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+ payment_id: PaymentId,
+ /// The hash that was given to [`ChannelManager::send_payment`].
+ ///
+ /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+ payment_hash: Option<PaymentHash>,
+ /// The payment path that was successful.
+ ///
+ /// May contain a closed channel if the HTLC sent along the path was fulfilled on chain.
+ path: Vec<RouteHop>,
+ },
+ /// Indicates an outbound HTLC we sent failed, likely due to an intermediary node being unable to
+ /// handle the HTLC.
+ ///
+ /// Note that this does *not* indicate that all paths for an MPP payment have failed, see
+ /// [`Event::PaymentFailed`].
+ ///
+ /// See [`ChannelManager::abandon_payment`] for giving up on this payment before its retries have
+ /// been exhausted.
+ ///
+ /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
+ PaymentPathFailed {
+ /// The id returned by [`ChannelManager::send_payment`] and used with
+ /// [`ChannelManager::abandon_payment`].
+ ///
+ /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+ /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
+ payment_id: Option<PaymentId>,
+ /// The hash that was given to [`ChannelManager::send_payment`].
+ ///
+ /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+ payment_hash: PaymentHash,
+ /// Indicates the payment was rejected for some reason by the recipient. This implies that
+ /// the payment has failed, not just the route in question. If this is not set, the payment may
+ /// be retried via a different route.
+ payment_failed_permanently: bool,
+ /// Extra error details based on the failure type. May contain an update that needs to be
+ /// applied to the [`NetworkGraph`].
+ ///
+ /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph
+ failure: PathFailure,
+ /// The payment path that failed.
+ path: Vec<RouteHop>,
+ /// The channel responsible for the failed payment path.
+ ///
+ /// Note that for route hints or for the first hop in a path this may be an SCID alias and
+ /// may not refer to a channel in the public network graph. These aliases may also collide
+ /// with channels in the public network graph.
+ ///
+ /// If this is `Some`, then the corresponding channel should be avoided when the payment is
+ /// retried. May be `None` for older [`Event`] serializations.
+ short_channel_id: Option<u64>,
+#[cfg(test)]
+ error_code: Option<u16>,
+#[cfg(test)]
+ error_data: Option<Vec<u8>>,
+ },
+ /// Indicates that a probe payment we sent returned successful, i.e., only failed at the destination.
+ ProbeSuccessful {
+ /// The id returned by [`ChannelManager::send_probe`].
+ ///
+ /// [`ChannelManager::send_probe`]: crate::ln::channelmanager::ChannelManager::send_probe
+ payment_id: PaymentId,
+ /// The hash generated by [`ChannelManager::send_probe`].
+ ///
+ /// [`ChannelManager::send_probe`]: crate::ln::channelmanager::ChannelManager::send_probe
+ payment_hash: PaymentHash,
+ /// The payment path that was successful.
+ path: Vec<RouteHop>,
+ },
+ /// Indicates that a probe payment we sent failed at an intermediary node on the path.
+ ProbeFailed {
+ /// The id returned by [`ChannelManager::send_probe`].
+ ///
+ /// [`ChannelManager::send_probe`]: crate::ln::channelmanager::ChannelManager::send_probe
+ payment_id: PaymentId,
+ /// The hash generated by [`ChannelManager::send_probe`].
+ ///
+ /// [`ChannelManager::send_probe`]: crate::ln::channelmanager::ChannelManager::send_probe
+ payment_hash: PaymentHash,
+ /// The payment path that failed.
+ path: Vec<RouteHop>,
+ /// The channel responsible for the failed probe.
+ ///
+ /// Note that for route hints or for the first hop in a path this may be an SCID alias and
+ /// may not refer to a channel in the public network graph. These aliases may also collide
+ /// with channels in the public network graph.
+ short_channel_id: Option<u64>,
+ },
+ /// Used to indicate that [`ChannelManager::process_pending_htlc_forwards`] should be called at
+ /// a time in the future.
+ ///
+ /// [`ChannelManager::process_pending_htlc_forwards`]: crate::ln::channelmanager::ChannelManager::process_pending_htlc_forwards
+ PendingHTLCsForwardable {
+ /// The minimum amount of time that should be waited prior to calling
+ /// process_pending_htlc_forwards. To increase the effort required to correlate payments,
+ /// you should wait a random amount of time in roughly the range (now + time_forwardable,
+ /// now + 5*time_forwardable).
+ time_forwardable: Duration,
+ },
+ /// Used to indicate that we've intercepted an HTLC forward. This event will only be generated if
+ /// you've encoded an intercept scid in the receiver's invoice route hints using
+ /// [`ChannelManager::get_intercept_scid`] and have set [`UserConfig::accept_intercept_htlcs`].
+ ///
+ /// [`ChannelManager::forward_intercepted_htlc`] or
+ /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to this event. See
+ /// their docs for more information.
+ ///
+ /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid
+ /// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs
+ /// [`ChannelManager::forward_intercepted_htlc`]: crate::ln::channelmanager::ChannelManager::forward_intercepted_htlc
+ /// [`ChannelManager::fail_intercepted_htlc`]: crate::ln::channelmanager::ChannelManager::fail_intercepted_htlc
+ HTLCIntercepted {
+ /// An id to help LDK identify which HTLC is being forwarded or failed.
+ intercept_id: InterceptId,
+ /// The fake scid that was programmed as the next hop's scid, generated using
+ /// [`ChannelManager::get_intercept_scid`].
+ ///
+ /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid
+ requested_next_hop_scid: u64,
+ /// The payment hash used for this HTLC.
+ payment_hash: PaymentHash,
+ /// How many msats were received on the inbound edge of this HTLC.
+ inbound_amount_msat: u64,
+ /// How many msats the payer intended to route to the next node. Depending on the reason you are
+ /// intercepting this payment, you might take a fee by forwarding less than this amount.
+ ///
+ /// Note that LDK will NOT check that expected fees were factored into this value. You MUST
+ /// check that whatever fee you want has been included here or subtract it as required. Further,
+ /// LDK will not stop you from forwarding more than you received.
+ expected_outbound_amount_msat: u64,
+ },
+ /// Used to indicate that an output which you should know how to spend was confirmed on chain
+ /// and is now spendable.
+ /// Such an output will *not* ever be spent by rust-lightning, and are not at risk of your
+ /// counterparty spending them due to some kind of timeout. Thus, you need to store them
+ /// somewhere and spend them when you create on-chain transactions.
+ SpendableOutputs {
+ /// The outputs which you should store as spendable by you.
+ outputs: Vec<SpendableOutputDescriptor>,
+ },
+ /// This event is generated when a payment has been successfully forwarded through us and a
+ /// forwarding fee earned.
+ PaymentForwarded {
+ /// The incoming channel between the previous node and us. This is only `None` for events
+ /// generated or serialized by versions prior to 0.0.107.
+ prev_channel_id: Option<[u8; 32]>,
+ /// The outgoing channel between the next node and us. This is only `None` for events
+ /// generated or serialized by versions prior to 0.0.107.
+ next_channel_id: Option<[u8; 32]>,
+ /// The fee, in milli-satoshis, which was earned as a result of the payment.
+ ///
+ /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC
+ /// was pending, the amount the next hop claimed will have been rounded down to the nearest
+ /// whole satoshi. Thus, the fee calculated here may be higher than expected as we still
+ /// claimed the full value in millisatoshis from the source. In this case,
+ /// `claim_from_onchain_tx` will be set.
+ ///
+ /// If the channel which sent us the payment has been force-closed, we will claim the funds
+ /// via an on-chain transaction. In that case we do not yet know the on-chain transaction
+ /// fees which we will spend and will instead set this to `None`. It is possible duplicate
+ /// `PaymentForwarded` events are generated for the same payment iff `fee_earned_msat` is
+ /// `None`.
+ fee_earned_msat: Option<u64>,
+ /// If this is `true`, the forwarded HTLC was claimed by our counterparty via an on-chain
+ /// transaction.
+ claim_from_onchain_tx: bool,
+ /// The final amount forwarded, in milli-satoshis, after the fee is deducted.
+ ///
+ /// The caveat described above the `fee_earned_msat` field applies here as well.
+ outbound_amount_forwarded_msat: Option<u64>,
+ },
+ /// Used to indicate that a channel with the given `channel_id` is being opened and pending
+ /// confirmation on-chain.
+ ///
+ /// This event is emitted when the funding transaction has been signed and is broadcast to the
+ /// network. For 0conf channels it will be immediately followed by the corresponding
+ /// [`Event::ChannelReady`] event.
+ ChannelPending {
+ /// The `channel_id` of the channel that is pending confirmation.
+ channel_id: [u8; 32],
+ /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
+ /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
+ /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
+ /// `user_channel_id` will be randomized for an inbound channel.
+ ///
+ /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
+ /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
+ /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
+ user_channel_id: u128,
+ /// The `temporary_channel_id` this channel used to be known by during channel establishment.
+ ///
+ /// Will be `None` for channels created prior to LDK version 0.0.115.
+ former_temporary_channel_id: Option<[u8; 32]>,
+ /// The `node_id` of the channel counterparty.
+ counterparty_node_id: PublicKey,
+ /// The outpoint of the channel's funding transaction.
+ funding_txo: OutPoint,
+ },
+ /// Used to indicate that a channel with the given `channel_id` is ready to
+ /// be used. This event is emitted either when the funding transaction has been confirmed
+ /// on-chain, or, in case of a 0conf channel, when both parties have confirmed the channel
+ /// establishment.
+ ChannelReady {
+ /// The `channel_id` of the channel that is ready.
+ channel_id: [u8; 32],
+ /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
+ /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
+ /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
+ /// `user_channel_id` will be randomized for an inbound channel.
+ ///
+ /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
+ /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
+ /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
+ user_channel_id: u128,
+ /// The `node_id` of the channel counterparty.
+ counterparty_node_id: PublicKey,
+ /// The features that this channel will operate with.
+ channel_type: ChannelTypeFeatures,
+ },
+ /// Used to indicate that a previously opened channel with the given `channel_id` is in the
+ /// process of closure.
+ ChannelClosed {
+ /// The `channel_id` of the channel which has been closed. Note that on-chain transactions
+ /// resolving the channel are likely still awaiting confirmation.
+ channel_id: [u8; 32],
+ /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
+ /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
+ /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
+ /// `user_channel_id` will be randomized for inbound channels.
+ /// This may be zero for inbound channels serialized prior to 0.0.113 and will always be
+ /// zero for objects serialized with LDK versions prior to 0.0.102.
+ ///
+ /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
+ /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
+ /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
+ user_channel_id: u128,
+ /// The reason the channel was closed.
+ reason: ClosureReason
+ },
+ /// Used to indicate to the user that they can abandon the funding transaction and recycle the
+ /// inputs for another purpose.
+ DiscardFunding {
+ /// The channel_id of the channel which has been closed.
+ channel_id: [u8; 32],
+ /// The full transaction received from the user
+ transaction: Transaction
+ },
+ /// Indicates a request to open a new channel by a peer.
+ ///
+ /// To accept the request, call [`ChannelManager::accept_inbound_channel`]. To reject the
+ /// request, call [`ChannelManager::force_close_without_broadcasting_txn`].
+ ///
+ /// The event is only triggered when a new open channel request is received and the
+ /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true.
+ ///
+ /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
+ /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
+ /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
+ OpenChannelRequest {
+ /// The temporary channel ID of the channel requested to be opened.
+ ///
+ /// When responding to the request, the `temporary_channel_id` should be passed
+ /// back to the ChannelManager through [`ChannelManager::accept_inbound_channel`] to accept,
+ /// or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject.
+ ///
+ /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
+ /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
+ temporary_channel_id: [u8; 32],
+ /// The node_id of the counterparty requesting to open the channel.
+ ///
+ /// When responding to the request, the `counterparty_node_id` should be passed
+ /// back to the `ChannelManager` through [`ChannelManager::accept_inbound_channel`] to
+ /// accept the request, or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject the
+ /// request.
+ ///
+ /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
+ /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
+ counterparty_node_id: PublicKey,
+ /// The channel value of the requested channel.
+ funding_satoshis: u64,
+ /// Our starting balance in the channel if the request is accepted, in milli-satoshi.
+ push_msat: u64,
+ /// The features that this channel will operate with. If you reject the channel, a
+ /// well-behaved counterparty may automatically re-attempt the channel with a new set of
+ /// feature flags.
+ ///
+ /// Note that if [`ChannelTypeFeatures::supports_scid_privacy`] returns true on this type,
+ /// the resulting [`ChannelManager`] will not be readable by versions of LDK prior to
+ /// 0.0.106.
+ ///
+ /// Furthermore, note that if [`ChannelTypeFeatures::supports_zero_conf`] returns true on this type,
+ /// the resulting [`ChannelManager`] will not be readable by versions of LDK prior to
+ /// 0.0.107. Channels setting this type also need to get manually accepted via
+ /// [`crate::ln::channelmanager::ChannelManager::accept_inbound_channel_from_trusted_peer_0conf`],
+ /// or will be rejected otherwise.
+ ///
+ /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
+ channel_type: ChannelTypeFeatures,
+ },
+ /// Indicates that the HTLC was accepted, but could not be processed when or after attempting to
+ /// forward it.
+ ///
+ /// Some scenarios where this event may be sent include:
+ /// * Insufficient capacity in the outbound channel
+ /// * While waiting to forward the HTLC, the channel it is meant to be forwarded through closes
+ /// * When an unknown SCID is requested for forwarding a payment.
+ /// * Expected MPP amount has already been reached
+ /// * The HTLC has timed out
+ ///
+ /// This event, however, does not get generated if an HTLC fails to meet the forwarding
+ /// requirements (i.e. insufficient fees paid, or a CLTV that is too soon).
+ HTLCHandlingFailed {
+ /// The channel over which the HTLC was received.
+ prev_channel_id: [u8; 32],
+ /// Destination of the HTLC that failed to be processed.
+ failed_next_destination: HTLCDestination,
+ },
+ #[cfg(anchors)]
+ /// Indicates that a transaction originating from LDK needs to have its fee bumped. This event
+ /// requires confirmed external funds to be readily available to spend.
+ ///
+ /// LDK does not currently generate this event. It is limited to the scope of channels with
+ /// anchor outputs, which will be introduced in a future release.
+ BumpTransaction(BumpTransactionEvent),
+}
+
+impl Writeable for Event {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ match self {
+ &Event::FundingGenerationReady { .. } => {
+ 0u8.write(writer)?;
+ // We never write out FundingGenerationReady events as, upon disconnection, peers
+ // drop any channels which have not yet exchanged funding_signed.
+ },
+ &Event::PaymentClaimable { ref payment_hash, ref amount_msat, ref purpose, ref receiver_node_id, ref via_channel_id, ref via_user_channel_id } => {
+ 1u8.write(writer)?;
+ let mut payment_secret = None;
+ let payment_preimage;
+ match &purpose {
+ PaymentPurpose::InvoicePayment { payment_preimage: preimage, payment_secret: secret } => {
+ payment_secret = Some(secret);
+ payment_preimage = *preimage;
+ },
+ PaymentPurpose::SpontaneousPayment(preimage) => {
+ payment_preimage = Some(*preimage);
+ }
+ }
+ write_tlv_fields!(writer, {
+ (0, payment_hash, required),
+ (1, receiver_node_id, option),
+ (2, payment_secret, option),
+ (3, via_channel_id, option),
+ (4, amount_msat, required),
+ (5, via_user_channel_id, option),
+ (6, 0u64, required), // user_payment_id required for compatibility with 0.0.103 and earlier
+ (8, payment_preimage, option),
+ });
+ },
+ &Event::PaymentSent { ref payment_id, ref payment_preimage, ref payment_hash, ref fee_paid_msat } => {
+ 2u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, payment_preimage, required),
+ (1, payment_hash, required),
+ (3, payment_id, option),
+ (5, fee_paid_msat, option),
+ });
+ },
+ &Event::PaymentPathFailed {
+ ref payment_id, ref payment_hash, ref payment_failed_permanently, ref failure,
+ ref path, ref short_channel_id,
+ #[cfg(test)]
+ ref error_code,
+ #[cfg(test)]
+ ref error_data,
+ } => {
+ 3u8.write(writer)?;
+ #[cfg(test)]
+ error_code.write(writer)?;
+ #[cfg(test)]
+ error_data.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, payment_hash, required),
+ (1, None::<NetworkUpdate>, option), // network_update in LDK versions prior to 0.0.114
+ (2, payment_failed_permanently, required),
+ (3, false, required), // all_paths_failed in LDK versions prior to 0.0.114
+ (5, *path, vec_type),
+ (7, short_channel_id, option),
+ (9, None::<RouteParameters>, option), // retry in LDK versions prior to 0.0.115
+ (11, payment_id, option),
+ (13, failure, required),
+ });
+ },
+ &Event::PendingHTLCsForwardable { time_forwardable: _ } => {
+ 4u8.write(writer)?;
+ // Note that we now ignore these on the read end as we'll re-generate them in
+ // ChannelManager, we write them here only for backwards compatibility.
+ },
+ &Event::SpendableOutputs { ref outputs } => {
+ 5u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, WithoutLength(outputs), required),
+ });
+ },
+ &Event::HTLCIntercepted { requested_next_hop_scid, payment_hash, inbound_amount_msat, expected_outbound_amount_msat, intercept_id } => {
+ 6u8.write(writer)?;
+ let intercept_scid = InterceptNextHop::FakeScid { requested_next_hop_scid };
+ write_tlv_fields!(writer, {
+ (0, intercept_id, required),
+ (2, intercept_scid, required),
+ (4, payment_hash, required),
+ (6, inbound_amount_msat, required),
+ (8, expected_outbound_amount_msat, required),
+ });
+ }
+ &Event::PaymentForwarded {
+ fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
+ next_channel_id, outbound_amount_forwarded_msat
+ } => {
+ 7u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, fee_earned_msat, option),
+ (1, prev_channel_id, option),
+ (2, claim_from_onchain_tx, required),
+ (3, next_channel_id, option),
+ (5, outbound_amount_forwarded_msat, option),
+ });
+ },
+ &Event::ChannelClosed { ref channel_id, ref user_channel_id, ref reason } => {
+ 9u8.write(writer)?;
+ // `user_channel_id` used to be a single u64 value. In order to remain backwards
+ // compatible with versions prior to 0.0.113, the u128 is serialized as two
+ // separate u64 values.
+ let user_channel_id_low = *user_channel_id as u64;
+ let user_channel_id_high = (*user_channel_id >> 64) as u64;
+ write_tlv_fields!(writer, {
+ (0, channel_id, required),
+ (1, user_channel_id_low, required),
+ (2, reason, required),
+ (3, user_channel_id_high, required),
+ });
+ },
+ &Event::DiscardFunding { ref channel_id, ref transaction } => {
+ 11u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, channel_id, required),
+ (2, transaction, required)
+ })
+ },
+ &Event::PaymentPathSuccessful { ref payment_id, ref payment_hash, ref path } => {
+ 13u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, payment_id, required),
+ (2, payment_hash, option),
+ (4, *path, vec_type)
+ })
+ },
+ &Event::PaymentFailed { ref payment_id, ref payment_hash } => {
+ 15u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, payment_id, required),
+ (2, payment_hash, required),
+ })
+ },
+ &Event::OpenChannelRequest { .. } => {
+ 17u8.write(writer)?;
+ // We never write the OpenChannelRequest events as, upon disconnection, peers
+ // drop any channels which have not yet exchanged funding_signed.
+ },
+ &Event::PaymentClaimed { ref payment_hash, ref amount_msat, ref purpose, ref receiver_node_id } => {
+ 19u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, payment_hash, required),
+ (1, receiver_node_id, option),
+ (2, purpose, required),
+ (4, amount_msat, required),
+ });
+ },
+ &Event::ProbeSuccessful { ref payment_id, ref payment_hash, ref path } => {
+ 21u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, payment_id, required),
+ (2, payment_hash, required),
+ (4, *path, vec_type)
+ })
+ },
+ &Event::ProbeFailed { ref payment_id, ref payment_hash, ref path, ref short_channel_id } => {
+ 23u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, payment_id, required),
+ (2, payment_hash, required),
+ (4, *path, vec_type),
+ (6, short_channel_id, option),
+ })
+ },
+ &Event::HTLCHandlingFailed { ref prev_channel_id, ref failed_next_destination } => {
+ 25u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, prev_channel_id, required),
+ (2, failed_next_destination, required),
+ })
+ },
+ #[cfg(anchors)]
+ &Event::BumpTransaction(ref event)=> {
+ 27u8.write(writer)?;
+ match event {
+ // We never write the ChannelClose|HTLCResolution events as they'll be replayed
+ // upon restarting anyway if they remain unresolved.
+ BumpTransactionEvent::ChannelClose { .. } => {}
+ BumpTransactionEvent::HTLCResolution { .. } => {}
+ }
+ write_tlv_fields!(writer, {}); // Write a length field for forwards compat
+ }
+ &Event::ChannelReady { ref channel_id, ref user_channel_id, ref counterparty_node_id, ref channel_type } => {
+ 29u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, channel_id, required),
+ (2, user_channel_id, required),
+ (4, counterparty_node_id, required),
+ (6, channel_type, required),
+ });
+ },
+ &Event::ChannelPending { ref channel_id, ref user_channel_id, ref former_temporary_channel_id, ref counterparty_node_id, ref funding_txo } => {
+ 31u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, channel_id, required),
+ (2, user_channel_id, required),
+ (4, former_temporary_channel_id, required),
+ (6, counterparty_node_id, required),
+ (8, funding_txo, required),
+ });
+ },
+ // Note that, going forward, all new events must only write data inside of
+ // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write
+ // data via `write_tlv_fields`.
+ }
+ Ok(())
+ }
+}
+impl MaybeReadable for Event {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, msgs::DecodeError> {
+ match Readable::read(reader)? {
+ // Note that we do not write a length-prefixed TLV for FundingGenerationReady events,
+ // unlike all other events, thus we return immediately here.
+ 0u8 => Ok(None),
+ 1u8 => {
+ let f = || {
+ let mut payment_hash = PaymentHash([0; 32]);
+ let mut payment_preimage = None;
+ let mut payment_secret = None;
+ let mut amount_msat = 0;
+ let mut receiver_node_id = None;
+ let mut _user_payment_id = None::<u64>; // For compatibility with 0.0.103 and earlier
+ let mut via_channel_id = None;
+ let mut via_user_channel_id = None;
+ read_tlv_fields!(reader, {
+ (0, payment_hash, required),
+ (1, receiver_node_id, option),
+ (2, payment_secret, option),
+ (3, via_channel_id, option),
+ (4, amount_msat, required),
+ (5, via_user_channel_id, option),
+ (6, _user_payment_id, option),
+ (8, payment_preimage, option),
+ });
+ let purpose = match payment_secret {
+ Some(secret) => PaymentPurpose::InvoicePayment {
+ payment_preimage,
+ payment_secret: secret
+ },
+ None if payment_preimage.is_some() => PaymentPurpose::SpontaneousPayment(payment_preimage.unwrap()),
+ None => return Err(msgs::DecodeError::InvalidValue),
+ };
+ Ok(Some(Event::PaymentClaimable {
+ receiver_node_id,
+ payment_hash,
+ amount_msat,
+ purpose,
+ via_channel_id,
+ via_user_channel_id,
+ }))
+ };
+ f()
+ },
+ 2u8 => {
+ let f = || {
+ let mut payment_preimage = PaymentPreimage([0; 32]);
+ let mut payment_hash = None;
+ let mut payment_id = None;
+ let mut fee_paid_msat = None;
+ read_tlv_fields!(reader, {
+ (0, payment_preimage, required),
+ (1, payment_hash, option),
+ (3, payment_id, option),
+ (5, fee_paid_msat, option),
+ });
+ if payment_hash.is_none() {
+ payment_hash = Some(PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()));
+ }
+ Ok(Some(Event::PaymentSent {
+ payment_id,
+ payment_preimage,
+ payment_hash: payment_hash.unwrap(),
+ fee_paid_msat,
+ }))
+ };
+ f()
+ },
+ 3u8 => {
+ let f = || {
+ #[cfg(test)]
+ let error_code = Readable::read(reader)?;
+ #[cfg(test)]
+ let error_data = Readable::read(reader)?;
+ let mut payment_hash = PaymentHash([0; 32]);
+ let mut payment_failed_permanently = false;
+ let mut network_update = None;
+ let mut path: Option<Vec<RouteHop>> = Some(vec![]);
+ let mut short_channel_id = None;
+ let mut payment_id = None;
+ let mut failure_opt = None;
+ read_tlv_fields!(reader, {
+ (0, payment_hash, required),
+ (1, network_update, upgradable_option),
+ (2, payment_failed_permanently, required),
+ (5, path, vec_type),
+ (7, short_channel_id, option),
+ (11, payment_id, option),
+ (13, failure_opt, upgradable_option),
+ });
+ let failure = failure_opt.unwrap_or_else(|| PathFailure::OnPath { network_update });
+ Ok(Some(Event::PaymentPathFailed {
+ payment_id,
+ payment_hash,
+ payment_failed_permanently,
+ failure,
+ path: path.unwrap(),
+ short_channel_id,
+ #[cfg(test)]
+ error_code,
+ #[cfg(test)]
+ error_data,
+ }))
+ };
+ f()
+ },
+ 4u8 => Ok(None),
+ 5u8 => {
+ let f = || {
+ let mut outputs = WithoutLength(Vec::new());
+ read_tlv_fields!(reader, {
+ (0, outputs, required),
+ });
+ Ok(Some(Event::SpendableOutputs { outputs: outputs.0 }))
+ };
+ f()
+ },
+ 6u8 => {
+ let mut payment_hash = PaymentHash([0; 32]);
+ let mut intercept_id = InterceptId([0; 32]);
+ let mut requested_next_hop_scid = InterceptNextHop::FakeScid { requested_next_hop_scid: 0 };
+ let mut inbound_amount_msat = 0;
+ let mut expected_outbound_amount_msat = 0;
+ read_tlv_fields!(reader, {
+ (0, intercept_id, required),
+ (2, requested_next_hop_scid, required),
+ (4, payment_hash, required),
+ (6, inbound_amount_msat, required),
+ (8, expected_outbound_amount_msat, required),
+ });
+ let next_scid = match requested_next_hop_scid {
+ InterceptNextHop::FakeScid { requested_next_hop_scid: scid } => scid
+ };
+ Ok(Some(Event::HTLCIntercepted {
+ payment_hash,
+ requested_next_hop_scid: next_scid,
+ inbound_amount_msat,
+ expected_outbound_amount_msat,
+ intercept_id,
+ }))
+ },
+ 7u8 => {
+ let f = || {
+ let mut fee_earned_msat = None;
+ let mut prev_channel_id = None;
+ let mut claim_from_onchain_tx = false;
+ let mut next_channel_id = None;
+ let mut outbound_amount_forwarded_msat = None;
+ read_tlv_fields!(reader, {
+ (0, fee_earned_msat, option),
+ (1, prev_channel_id, option),
+ (2, claim_from_onchain_tx, required),
+ (3, next_channel_id, option),
+ (5, outbound_amount_forwarded_msat, option),
+ });
+ Ok(Some(Event::PaymentForwarded {
+ fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
+ outbound_amount_forwarded_msat
+ }))
+ };
+ f()
+ },
+ 9u8 => {
+ let f = || {
+ let mut channel_id = [0; 32];
+ let mut reason = UpgradableRequired(None);
+ let mut user_channel_id_low_opt: Option<u64> = None;
+ let mut user_channel_id_high_opt: Option<u64> = None;
+ read_tlv_fields!(reader, {
+ (0, channel_id, required),
+ (1, user_channel_id_low_opt, option),
+ (2, reason, upgradable_required),
+ (3, user_channel_id_high_opt, option),
+ });
+
+ // `user_channel_id` used to be a single u64 value. In order to remain
+ // backwards compatible with versions prior to 0.0.113, the u128 is serialized
+ // as two separate u64 values.
+ let user_channel_id = (user_channel_id_low_opt.unwrap_or(0) as u128) +
+ ((user_channel_id_high_opt.unwrap_or(0) as u128) << 64);
+
+ Ok(Some(Event::ChannelClosed { channel_id, user_channel_id, reason: _init_tlv_based_struct_field!(reason, upgradable_required) }))
+ };
+ f()
+ },
+ 11u8 => {
+ let f = || {
+ let mut channel_id = [0; 32];
+ let mut transaction = Transaction{ version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
+ read_tlv_fields!(reader, {
+ (0, channel_id, required),
+ (2, transaction, required),
+ });
+ Ok(Some(Event::DiscardFunding { channel_id, transaction } ))
+ };
+ f()
+ },
+ 13u8 => {
+ let f = || {
+ let mut payment_id = PaymentId([0; 32]);
+ let mut payment_hash = None;
+ let mut path: Option<Vec<RouteHop>> = Some(vec![]);
+ read_tlv_fields!(reader, {
+ (0, payment_id, required),
+ (2, payment_hash, option),
+ (4, path, vec_type),
+ });
+ Ok(Some(Event::PaymentPathSuccessful {
+ payment_id,
+ payment_hash,
+ path: path.unwrap(),
+ }))
+ };
+ f()
+ },
+ 15u8 => {
+ let f = || {
+ let mut payment_hash = PaymentHash([0; 32]);
+ let mut payment_id = PaymentId([0; 32]);
+ read_tlv_fields!(reader, {
+ (0, payment_id, required),
+ (2, payment_hash, required),
+ });
+ Ok(Some(Event::PaymentFailed {
+ payment_id,
+ payment_hash,
+ }))
+ };
+ f()
+ },
+ 17u8 => {
+ // Value 17 is used for `Event::OpenChannelRequest`.
+ Ok(None)
+ },
+ 19u8 => {
+ let f = || {
+ let mut payment_hash = PaymentHash([0; 32]);
+ let mut purpose = UpgradableRequired(None);
+ let mut amount_msat = 0;
+ let mut receiver_node_id = None;
+ read_tlv_fields!(reader, {
+ (0, payment_hash, required),
+ (1, receiver_node_id, option),
+ (2, purpose, upgradable_required),
+ (4, amount_msat, required),
+ });
+ Ok(Some(Event::PaymentClaimed {
+ receiver_node_id,
+ payment_hash,
+ purpose: _init_tlv_based_struct_field!(purpose, upgradable_required),
+ amount_msat,
+ }))
+ };
+ f()
+ },
+ 21u8 => {
+ let f = || {
+ let mut payment_id = PaymentId([0; 32]);
+ let mut payment_hash = PaymentHash([0; 32]);
+ let mut path: Option<Vec<RouteHop>> = Some(vec![]);
+ read_tlv_fields!(reader, {
+ (0, payment_id, required),
+ (2, payment_hash, required),
+ (4, path, vec_type),
+ });
+ Ok(Some(Event::ProbeSuccessful {
+ payment_id,
+ payment_hash,
+ path: path.unwrap(),
+ }))
+ };
+ f()
+ },
+ 23u8 => {
+ let f = || {
+ let mut payment_id = PaymentId([0; 32]);
+ let mut payment_hash = PaymentHash([0; 32]);
+ let mut path: Option<Vec<RouteHop>> = Some(vec![]);
+ let mut short_channel_id = None;
+ read_tlv_fields!(reader, {
+ (0, payment_id, required),
+ (2, payment_hash, required),
+ (4, path, vec_type),
+ (6, short_channel_id, option),
+ });
+ Ok(Some(Event::ProbeFailed {
+ payment_id,
+ payment_hash,
+ path: path.unwrap(),
+ short_channel_id,
+ }))
+ };
+ f()
+ },
+ 25u8 => {
+ let f = || {
+ let mut prev_channel_id = [0; 32];
+ let mut failed_next_destination_opt = UpgradableRequired(None);
+ read_tlv_fields!(reader, {
+ (0, prev_channel_id, required),
+ (2, failed_next_destination_opt, upgradable_required),
+ });
+ Ok(Some(Event::HTLCHandlingFailed {
+ prev_channel_id,
+ failed_next_destination: _init_tlv_based_struct_field!(failed_next_destination_opt, upgradable_required),
+ }))
+ };
+ f()
+ },
+ 27u8 => Ok(None),
+ 29u8 => {
+ let f = || {
+ let mut channel_id = [0; 32];
+ let mut user_channel_id: u128 = 0;
+ let mut counterparty_node_id = RequiredWrapper(None);
+ let mut channel_type = RequiredWrapper(None);
+ read_tlv_fields!(reader, {
+ (0, channel_id, required),
+ (2, user_channel_id, required),
+ (4, counterparty_node_id, required),
+ (6, channel_type, required),
+ });
+
+ Ok(Some(Event::ChannelReady {
+ channel_id,
+ user_channel_id,
+ counterparty_node_id: counterparty_node_id.0.unwrap(),
+ channel_type: channel_type.0.unwrap()
+ }))
+ };
+ f()
+ },
+ 31u8 => {
+ let f = || {
+ let mut channel_id = [0; 32];
+ let mut user_channel_id: u128 = 0;
+ let mut former_temporary_channel_id = None;
+ let mut counterparty_node_id = RequiredWrapper(None);
+ let mut funding_txo = RequiredWrapper(None);
+ read_tlv_fields!(reader, {
+ (0, channel_id, required),
+ (2, user_channel_id, required),
+ (4, former_temporary_channel_id, required),
+ (6, counterparty_node_id, required),
+ (8, funding_txo, required),
+ });
+
+ Ok(Some(Event::ChannelPending {
+ channel_id,
+ user_channel_id,
+ former_temporary_channel_id,
+ counterparty_node_id: counterparty_node_id.0.unwrap(),
+ funding_txo: funding_txo.0.unwrap()
+ }))
+ };
+ f()
+ },
+ // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
+ // Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt
+ // reads.
+ x if x % 2 == 1 => {
+ // If the event is of unknown type, assume it was written with `write_tlv_fields`,
+ // which prefixes the whole thing with a length BigSize. Because the event is
+ // odd-type unknown, we should treat it as `Ok(None)` even if it has some TLV
+ // fields that are even. Thus, we avoid using `read_tlv_fields` and simply read
+ // exactly the number of bytes specified, ignoring them entirely.
+ let tlv_len: BigSize = Readable::read(reader)?;
+ FixedLengthReader::new(reader, tlv_len.0)
+ .eat_remaining().map_err(|_| msgs::DecodeError::ShortRead)?;
+ Ok(None)
+ },
+ _ => Err(msgs::DecodeError::InvalidValue)
+ }
+ }
+}
+
+/// An event generated by ChannelManager which indicates a message should be sent to a peer (or
+/// broadcast to most peers).
+/// These events are handled by PeerManager::process_events if you are using a PeerManager.
+#[derive(Clone, Debug)]
+pub enum MessageSendEvent {
+ /// Used to indicate that we've accepted a channel open and should send the accept_channel
+ /// message provided to the given peer.
+ SendAcceptChannel {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::AcceptChannel,
+ },
+ /// Used to indicate that we've initiated a channel open and should send the open_channel
+ /// message provided to the given peer.
+ SendOpenChannel {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::OpenChannel,
+ },
+ /// Used to indicate that a funding_created message should be sent to the peer with the given node_id.
+ SendFundingCreated {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::FundingCreated,
+ },
+ /// Used to indicate that a funding_signed message should be sent to the peer with the given node_id.
+ SendFundingSigned {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::FundingSigned,
+ },
+ /// Used to indicate that a channel_ready message should be sent to the peer with the given node_id.
+ SendChannelReady {
+ /// The node_id of the node which should receive these message(s)
+ node_id: PublicKey,
+ /// The channel_ready message which should be sent.
+ msg: msgs::ChannelReady,
+ },
+ /// Used to indicate that an announcement_signatures message should be sent to the peer with the given node_id.
+ SendAnnouncementSignatures {
+ /// The node_id of the node which should receive these message(s)
+ node_id: PublicKey,
+ /// The announcement_signatures message which should be sent.
+ msg: msgs::AnnouncementSignatures,
+ },
+ /// Used to indicate that a series of HTLC update messages, as well as a commitment_signed
+ /// message should be sent to the peer with the given node_id.
+ UpdateHTLCs {
+ /// The node_id of the node which should receive these message(s)
+ node_id: PublicKey,
+ /// The update messages which should be sent. ALL messages in the struct should be sent!
+ updates: msgs::CommitmentUpdate,
+ },
+ /// Used to indicate that a revoke_and_ack message should be sent to the peer with the given node_id.
+ SendRevokeAndACK {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::RevokeAndACK,
+ },
+ /// Used to indicate that a closing_signed message should be sent to the peer with the given node_id.
+ SendClosingSigned {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::ClosingSigned,
+ },
+ /// Used to indicate that a shutdown message should be sent to the peer with the given node_id.
+ SendShutdown {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::Shutdown,
+ },
+ /// Used to indicate that a channel_reestablish message should be sent to the peer with the given node_id.
+ SendChannelReestablish {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::ChannelReestablish,
+ },
+ /// Used to send a channel_announcement and channel_update to a specific peer, likely on
+ /// initial connection to ensure our peers know about our channels.
+ SendChannelAnnouncement {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The channel_announcement which should be sent.
+ msg: msgs::ChannelAnnouncement,
+ /// The followup channel_update which should be sent.
+ update_msg: msgs::ChannelUpdate,
+ },
+ /// Used to indicate that a channel_announcement and channel_update should be broadcast to all
+ /// peers (except the peer with node_id either msg.contents.node_id_1 or msg.contents.node_id_2).
+ ///
+ /// Note that after doing so, you very likely (unless you did so very recently) want to
+ /// broadcast a node_announcement (e.g. via [`PeerManager::broadcast_node_announcement`]). This
+ /// ensures that any nodes which see our channel_announcement also have a relevant
+ /// node_announcement, including relevant feature flags which may be important for routing
+ /// through or to us.
+ ///
+ /// [`PeerManager::broadcast_node_announcement`]: crate::ln::peer_handler::PeerManager::broadcast_node_announcement
+ BroadcastChannelAnnouncement {
+ /// The channel_announcement which should be sent.
+ msg: msgs::ChannelAnnouncement,
+ /// The followup channel_update which should be sent.
+ update_msg: Option<msgs::ChannelUpdate>,
+ },
+ /// Used to indicate that a channel_update should be broadcast to all peers.
+ BroadcastChannelUpdate {
+ /// The channel_update which should be sent.
+ msg: msgs::ChannelUpdate,
+ },
+ /// Used to indicate that a node_announcement should be broadcast to all peers.
+ BroadcastNodeAnnouncement {
+ /// The node_announcement which should be sent.
+ msg: msgs::NodeAnnouncement,
+ },
+ /// Used to indicate that a channel_update should be sent to a single peer.
+ /// In contrast to [`Self::BroadcastChannelUpdate`], this is used when the channel is a
+ /// private channel and we shouldn't be informing all of our peers of channel parameters.
+ SendChannelUpdate {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The channel_update which should be sent.
+ msg: msgs::ChannelUpdate,
+ },
+ /// Broadcast an error downstream to be handled
+ HandleError {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The action which should be taken.
+ action: msgs::ErrorAction
+ },
+ /// Query a peer for channels with funding transaction UTXOs in a block range.
+ SendChannelRangeQuery {
+ /// The node_id of this message recipient
+ node_id: PublicKey,
+ /// The query_channel_range which should be sent.
+ msg: msgs::QueryChannelRange,
+ },
+ /// Request routing gossip messages from a peer for a list of channels identified by
+ /// their short_channel_ids.
+ SendShortIdsQuery {
+ /// The node_id of this message recipient
+ node_id: PublicKey,
+ /// The query_short_channel_ids which should be sent.
+ msg: msgs::QueryShortChannelIds,
+ },
+ /// Sends a reply to a channel range query. This may be one of several SendReplyChannelRange events
+ /// emitted during processing of the query.
+ SendReplyChannelRange {
+ /// The node_id of this message recipient
+ node_id: PublicKey,
+ /// The reply_channel_range which should be sent.
+ msg: msgs::ReplyChannelRange,
+ },
+ /// Sends a timestamp filter for inbound gossip. This should be sent on each new connection to
+ /// enable receiving gossip messages from the peer.
+ SendGossipTimestampFilter {
+ /// The node_id of this message recipient
+ node_id: PublicKey,
+ /// The gossip_timestamp_filter which should be sent.
+ msg: msgs::GossipTimestampFilter,
+ },
+}
+
+/// A trait indicating an object may generate message send events
+pub trait MessageSendEventsProvider {
+ /// Gets the list of pending events which were generated by previous actions, clearing the list
+ /// in the process.
+ fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent>;
+}
+
+/// A trait indicating an object may generate onion messages to send
+pub trait OnionMessageProvider {
+ /// Gets the next pending onion message for the peer with the given node id.
+ fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option<msgs::OnionMessage>;
+}
+
+/// A trait indicating an object may generate events.
+///
+/// Events are processed by passing an [`EventHandler`] to [`process_pending_events`].
+///
+/// Implementations of this trait may also feature an async version of event handling, as shown with
+/// [`ChannelManager::process_pending_events_async`] and
+/// [`ChainMonitor::process_pending_events_async`].
+///
+/// # Requirements
+///
+/// When using this trait, [`process_pending_events`] will call [`handle_event`] for each pending
+/// event since the last invocation.
+///
+/// In order to ensure no [`Event`]s are lost, implementors of this trait will persist [`Event`]s
+/// and replay any unhandled events on startup. An [`Event`] is considered handled when
+/// [`process_pending_events`] returns, thus handlers MUST fully handle [`Event`]s and persist any
+/// relevant changes to disk *before* returning.
+///
+/// Further, because an application may crash between an [`Event`] being handled and the
+/// implementor of this trait being re-serialized, [`Event`] handling must be idempotent - in
+/// effect, [`Event`]s may be replayed.
+///
+/// Note, handlers may call back into the provider and thus deadlocking must be avoided. Be sure to
+/// consult the provider's documentation on the implication of processing events and how a handler
+/// may safely use the provider (e.g., see [`ChannelManager::process_pending_events`] and
+/// [`ChainMonitor::process_pending_events`]).
+///
+/// (C-not implementable) As there is likely no reason for a user to implement this trait on their
+/// own type(s).
+///
+/// [`process_pending_events`]: Self::process_pending_events
+/// [`handle_event`]: EventHandler::handle_event
+/// [`ChannelManager::process_pending_events`]: crate::ln::channelmanager::ChannelManager#method.process_pending_events
+/// [`ChainMonitor::process_pending_events`]: crate::chain::chainmonitor::ChainMonitor#method.process_pending_events
+/// [`ChannelManager::process_pending_events_async`]: crate::ln::channelmanager::ChannelManager::process_pending_events_async
+/// [`ChainMonitor::process_pending_events_async`]: crate::chain::chainmonitor::ChainMonitor::process_pending_events_async
+pub trait EventsProvider {
+ /// Processes any events generated since the last call using the given event handler.
+ ///
+ /// See the trait-level documentation for requirements.
+ fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler;
+}
+
+/// A trait implemented for objects handling events from [`EventsProvider`].
+///
+/// An async variation also exists for implementations of [`EventsProvider`] that support async
+/// event handling. The async event handler should satisfy the generic bounds: `F:
+/// core::future::Future, H: Fn(Event) -> F`.
+pub trait EventHandler {
+ /// Handles the given [`Event`].
+ ///
+ /// See [`EventsProvider`] for details that must be considered when implementing this method.
+ fn handle_event(&self, event: Event);
+}
+
+impl<F> EventHandler for F where F: Fn(Event) {
+ fn handle_event(&self, event: Event) {
+ self(event)
+ }
+}
+
+impl<T: EventHandler> EventHandler for Arc<T> {
+ fn handle_event(&self, event: Event) {
+ self.deref().handle_event(event)
+ }
+}
pub mod offers;
pub mod routing;
pub mod onion_message;
+pub mod events;
#[cfg(feature = "std")]
/// Re-export of either `core2::io` or `std::io`, depending on the `std` feature flag.
// licenses.
//! Various utilities for building scripts and deriving keys related to channels. These are
-//! largely of interest for those implementing chain::keysinterface::Sign message signing by hand.
+//! largely of interest for those implementing the traits on [`chain::keysinterface`] by hand.
use bitcoin::blockdata::script::{Script,Builder};
use bitcoin::blockdata::opcodes;
///
/// Normally, this is converted to the broadcaster/countersignatory-organized DirectedChannelTransactionParameters
/// before use, via the as_holder_broadcastable and as_counterparty_broadcastable functions.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ChannelTransactionParameters {
/// Holder public keys
pub holder_pubkeys: ChannelPublicKeys,
}
/// Late-bound per-channel counterparty data used to build transactions.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CounterpartyChannelTransactionParameters {
/// Counter-party public keys
pub pubkeys: ChannelPublicKeys,
///
/// Only include HTLCs that are above the dust limit for the channel.
///
- /// (C-not exported) due to the generic though we likely should expose a version without
+ /// This is not exported to bindings users due to the generic though we likely should expose a version without
pub fn new_with_auxiliary_htlc_data<T>(commitment_number: u64, to_broadcaster_value_sat: u64, to_countersignatory_value_sat: u64, opt_anchors: bool, broadcaster_funding_key: PublicKey, countersignatory_funding_key: PublicKey, keys: TxCreationKeys, feerate_per_kw: u32, htlcs_with_aux: &mut Vec<(HTLCOutputInCommitment, T)>, channel_parameters: &DirectedChannelTransactionParameters) -> CommitmentTransaction {
// Sort outputs and populate output indices while keeping track of the auxiliary data
let (outputs, htlcs) = Self::internal_build_outputs(&keys, to_broadcaster_value_sat, to_countersignatory_value_sat, htlcs_with_aux, channel_parameters, opt_anchors, &broadcaster_funding_key, &countersignatory_funding_key).unwrap();
/// Use non-zero fee anchors
///
- /// (C-not exported) due to move, and also not likely to be useful for binding users
+ /// This is not exported to bindings users due to move, and also not likely to be useful for binding users
pub fn with_non_zero_fee_anchors(mut self) -> Self {
self.opt_non_zero_fee_anchors = Some(());
self
/// which were included in this commitment transaction in output order.
/// The transaction index is always populated.
///
- /// (C-not exported) as we cannot currently convert Vec references to/from C, though we should
+ /// This is not exported to bindings users as we cannot currently convert Vec references to/from C, though we should
/// expose a less effecient version which creates a Vec of references in the future.
pub fn htlcs(&self) -> &Vec<HTLCOutputInCommitment> {
&self.htlcs
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
use crate::chain::transaction::OutPoint;
use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
+use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
use crate::ln::channelmanager::{ChannelManager, RAACommitmentOrder, PaymentSendFailure, PaymentId};
use crate::ln::channel::AnnouncementSigsState;
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
use crate::util::enforcing_trait_impls::EnforcingSigner;
-use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
use crate::util::errors::APIError;
use crate::util::ser::{ReadableArgs, Writeable};
use crate::util::test_utils::TestBroadcaster;
// because the update is bogus, ultimately the error that's returned
// should be a PermanentFailure.
if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
- logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
+ logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
}
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
+ expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 0);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ // But we want to re-emit ChannelPending
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors!(nodes[1], 1);
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(bs_signed_locked.len(), if use_0conf { 2 } else { 1 });
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
check_added_monitors!(nodes[0], 1);
+ expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
if lock_commitment {
use crate::ln::onion_utils::HTLCFailReason;
use crate::chain::BestBlock;
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
-use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::chain::keysinterface::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
+use crate::events::ClosureReason;
use crate::routing::gossip::NodeId;
-use crate::util::events::ClosureReason;
use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
use crate::util::logger::Logger;
use crate::util::errors::APIError;
user_id: u128,
channel_id: [u8; 32],
+ temporary_channel_id: Option<[u8; 32]>, // Will be `None` for channels created prior to 0.0.115.
channel_state: u32,
// When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
// blinded paths instead of simple scid+node_id aliases.
outbound_scid_alias: u64,
+ // We track whether we already emitted a `ChannelPending` event.
+ channel_pending_event_emitted: bool,
+
// We track whether we already emitted a `ChannelReady` event.
channel_ready_event_emitted: bool,
}
}
+ let temporary_channel_id = entropy_source.get_secure_random_bytes();
+
Ok(Channel {
user_id,
inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
- channel_id: entropy_source.get_secure_random_bytes(),
+ channel_id: temporary_channel_id,
+ temporary_channel_id: Some(temporary_channel_id),
channel_state: ChannelState::OurInitSent as u32,
announcement_sigs_state: AnnouncementSigsState::NotSent,
secp_ctx,
latest_inbound_scid_alias: None,
outbound_scid_alias,
+ channel_pending_event_emitted: false,
channel_ready_event_emitted: false,
#[cfg(any(test, fuzzing))]
inbound_handshake_limits_override: None,
channel_id: msg.temporary_channel_id,
+ temporary_channel_id: Some(msg.temporary_channel_id),
channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
announcement_sigs_state: AnnouncementSigsState::NotSent,
secp_ctx,
latest_inbound_scid_alias: None,
outbound_scid_alias,
+ channel_pending_event_emitted: false,
channel_ready_event_emitted: false,
#[cfg(any(test, fuzzing))]
Ok((msgs::FundingSigned {
channel_id: self.channel_id,
- signature
+ signature,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
}, channel_monitor))
}
channel_id: self.channel_id,
per_commitment_secret,
next_per_commitment_point,
+ #[cfg(taproot)]
+ next_local_nonce: None,
}
}
self.channel_id
}
+ // Return the `temporary_channel_id` used during channel establishment.
+ //
+ // Will return `None` for channels created prior to LDK version 0.0.115.
+ pub fn temporary_channel_id(&self) -> Option<[u8; 32]> {
+ self.temporary_channel_id
+ }
+
pub fn minimum_depth(&self) -> Option<u32> {
self.minimum_depth
}
self.prev_config.map(|prev_config| prev_config.0)
}
+ // Checks whether we should emit a `ChannelPending` event.
+ pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
+ self.is_funding_initiated() && !self.channel_pending_event_emitted
+ }
+
+ // Returns whether we already emitted a `ChannelPending` event.
+ pub(crate) fn channel_pending_event_emitted(&self) -> bool {
+ self.channel_pending_event_emitted
+ }
+
+ // Remembers that we already emitted a `ChannelPending` event.
+ pub(crate) fn set_channel_pending_event_emitted(&mut self) {
+ self.channel_pending_event_emitted = true;
+ }
+
// Checks whether we should emit a `ChannelReady` event.
pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
self.is_usable() && !self.channel_ready_event_emitted
})
}
- pub fn get_feerate(&self) -> u32 {
+ pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
self.feerate_per_kw
}
None => Builder::new().into_script(),
}),
channel_type: Some(self.channel_type.clone()),
+ #[cfg(taproot)]
+ next_local_nonce: None,
}
}
temporary_channel_id,
funding_txid: funding_txo.txid,
funding_output_index: funding_txo.index,
- signature
+ signature,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
+ #[cfg(taproot)]
+ next_local_nonce: None,
})
}
channel_id: self.channel_id,
signature,
htlc_signatures,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
}, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
}
// monitor update to the user, even if we return one).
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
- self.latest_monitor_update_id += 1;
+ self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
Some((funding_txo, ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
if self.holder_max_htlc_value_in_flight_msat != Self::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis, &old_max_in_flight_percent_config)
{ Some(self.holder_max_htlc_value_in_flight_msat) } else { None };
+ let channel_pending_event_emitted = Some(self.channel_pending_event_emitted);
let channel_ready_event_emitted = Some(self.channel_ready_event_emitted);
// `user_id` used to be a single u64 value. In order to remain backwards compatible with
(23, channel_ready_event_emitted, option),
(25, user_id_high_opt, option),
(27, self.channel_keys_id, required),
+ (29, self.temporary_channel_id, option),
+ (31, channel_pending_event_emitted, option),
});
Ok(())
let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
let mut latest_inbound_scid_alias = None;
let mut outbound_scid_alias = None;
+ let mut channel_pending_event_emitted = None;
let mut channel_ready_event_emitted = None;
let mut user_id_high_opt: Option<u64> = None;
let mut channel_keys_id: Option<[u8; 32]> = None;
+ let mut temporary_channel_id: Option<[u8; 32]> = None;
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
(23, channel_ready_event_emitted, option),
(25, user_id_high_opt, option),
(27, channel_keys_id, option),
+ (29, temporary_channel_id, option),
+ (31, channel_pending_event_emitted, option),
});
let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
inbound_handshake_limits_override: None,
channel_id,
+ temporary_channel_id,
channel_state,
announcement_sigs_state: announcement_sigs_state.unwrap(),
secp_ctx,
// Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
+ channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
#[cfg(any(test, fuzzing))]
first_hop_htlc_msat: 548,
payment_id: PaymentId([42; 32]),
payment_secret: None,
- payment_params: None,
}
});
let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let mut config = UserConfig::default();
config.channel_handshake_config.announced_channel = false;
- let mut chan = Channel::<InMemorySigner>::new_outbound(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 100000, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
+ let mut chan = Channel::<InMemorySigner>::new_outbound(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
chan.holder_dust_limit_satoshis = 546;
chan.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
} }
}
+ // anchors: simple commitment tx with no HTLCs and single anchor
+ test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
+ "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
+ "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
+
// simple commitment tx with no HTLCs
chan.value_to_self_msat = 7000000000;
chan.pending_outbound_htlcs.push({
let mut out = OutboundHTLCOutput{
htlc_id: 6,
- amount_msat: 5000000,
+ amount_msat: 5000001,
cltv_expiry: 506,
payment_hash: PaymentHash([0; 32]),
state: OutboundHTLCState::Committed,
out
});
- test_commitment!("30440220048705bec5288d28b3f29344b8d124853b1af423a568664d2c6f02c8ea886525022060f998a461052a2476b912db426ea2a06700953a241135c7957f2e79bc222df9",
- "3045022100c4f1d60b6fca9febc8b39de1a31e84c5f7c4b41c97239ef05f4350aa484c6b5e02200c5134ac8b20eb7a29d0dd4a501f6aa8fefb8489171f4cb408bd2a32324ab03f",
- "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a79f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c4f1d60b6fca9febc8b39de1a31e84c5f7c4b41c97239ef05f4350aa484c6b5e02200c5134ac8b20eb7a29d0dd4a501f6aa8fefb8489171f4cb408bd2a32324ab03f014730440220048705bec5288d28b3f29344b8d124853b1af423a568664d2c6f02c8ea886525022060f998a461052a2476b912db426ea2a06700953a241135c7957f2e79bc222df901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
+ test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
+ "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
+ "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
{ 0,
- "304502210081cbb94121761d34c189cd4e6a281feea6f585060ad0ba2632e8d6b3c6bb8a6c02201007981bbd16539d63df2805b5568f1f5688cd2a885d04706f50db9b77ba13c6",
- "304502210090ed76aeb21b53236a598968abc66e2024691d07b62f53ddbeca8f93144af9c602205f873af5a0c10e62690e9aba09740550f194a9dc455ba4c1c23f6cde7704674c",
- "0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc34000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050048304502210081cbb94121761d34c189cd4e6a281feea6f585060ad0ba2632e8d6b3c6bb8a6c02201007981bbd16539d63df2805b5568f1f5688cd2a885d04706f50db9b77ba13c60148304502210090ed76aeb21b53236a598968abc66e2024691d07b62f53ddbeca8f93144af9c602205f873af5a0c10e62690e9aba09740550f194a9dc455ba4c1c23f6cde7704674c012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
+ "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
+ "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
+ "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
{ 1,
- "304402201d0f09d2bf7bc245a4f17980e1e9164290df16c70c6a2ff1592f5030d6108581022061e744a7dc151b36bf0aff7a4f1812ba90b8b03633bb979a270d19858fd960c5",
- "30450221009aef000d2e843a4202c1b1a2bf554abc9a7902bf49b2cb0759bc507456b7ebad02204e7c3d193ede2fd2b4cd6b39f51a920e581e35575e357e44d7b699c40ce61d39",
- "0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc3401000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402201d0f09d2bf7bc245a4f17980e1e9164290df16c70c6a2ff1592f5030d6108581022061e744a7dc151b36bf0aff7a4f1812ba90b8b03633bb979a270d19858fd960c5014830450221009aef000d2e843a4202c1b1a2bf554abc9a7902bf49b2cb0759bc507456b7ebad02204e7c3d193ede2fd2b4cd6b39f51a920e581e35575e357e44d7b699c40ce61d3901008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
+ "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
+ "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
+ "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
{ 2,
- "30440220010bf035d5823596e50dce2076a4d9f942d8d28031c9c428b901a02b6b8140de02203250e8e4a08bc5b4ecdca4d0eedf98223e02e3ac1c0206b3a7ffdb374aa21e5f",
- "30440220073de0067b88e425b3018b30366bfeda0ccb703118ccd3d02ead08c0f53511d002203fac50ac0e4cf8a3af0b4b1b12e801650591f748f8ddf1e089c160f10b69e511",
- "0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc3402000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220010bf035d5823596e50dce2076a4d9f942d8d28031c9c428b901a02b6b8140de02203250e8e4a08bc5b4ecdca4d0eedf98223e02e3ac1c0206b3a7ffdb374aa21e5f014730440220073de0067b88e425b3018b30366bfeda0ccb703118ccd3d02ead08c0f53511d002203fac50ac0e4cf8a3af0b4b1b12e801650591f748f8ddf1e089c160f10b69e51101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
+ "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
+ "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
+ "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
} );
- test_commitment_with_anchors!("3045022100c592f6b80d35b4f5d1e3bc9788f51141a0065be6013bad53a1977f7c444651660220278ac06ead9016bfb8dc476f186eabace2b02793b2f308442f5b0d5f24a68948",
- "3045022100c37ac4fc8538677631230c4b286f36b6f54c51fb4b34ef0bd0ba219ba47452630220278e09a745454ea380f3694392ed113762c68dd209b48360f547541088be9e45",
- "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aae9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c37ac4fc8538677631230c4b286f36b6f54c51fb4b34ef0bd0ba219ba47452630220278e09a745454ea380f3694392ed113762c68dd209b48360f547541088be9e4501483045022100c592f6b80d35b4f5d1e3bc9788f51141a0065be6013bad53a1977f7c444651660220278ac06ead9016bfb8dc476f186eabace2b02793b2f308442f5b0d5f24a6894801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
+ test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
+ "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
+ "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
{ 0,
- "3045022100de8a0649d54fd2e4fc04502c77df9b65da839bbd01854f818f129338b99564b2022009528dbb12c00e874cb2149b1dccc600c69ea5e4042ebf584984fcb029c2d1ec",
- "304402203e7c2622fa3ca29355d37a0ea991bfd7cdb54e6122a1d98d3229d092131f55cd022055263f7f8f32f4cd2f86da63ca106bd7badf0b19ee9833d80cd3b9216eeafd74",
- "02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe2902000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100de8a0649d54fd2e4fc04502c77df9b65da839bbd01854f818f129338b99564b2022009528dbb12c00e874cb2149b1dccc600c69ea5e4042ebf584984fcb029c2d1ec8347304402203e7c2622fa3ca29355d37a0ea991bfd7cdb54e6122a1d98d3229d092131f55cd022055263f7f8f32f4cd2f86da63ca106bd7badf0b19ee9833d80cd3b9216eeafd74012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
+ "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
+ "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
+ "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
{ 1,
- "3045022100de6eee8474376ea316d007b33103b4543a46bdf6fda5cbd5902b28a5bc14584f022002989e7b4f7813b77acbe4babcf96d7ffbbe0bf14cba24672364f8e591479edb",
- "3045022100c10688346a9d84647bde7027da07f0d79c6d4129307e4c6c9aea7bdbf25ac3350220269104209793c32c47491698c4e46ebea9c3293a1e4403f9abda39f79698f6b5",
- "02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe290300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100de6eee8474376ea316d007b33103b4543a46bdf6fda5cbd5902b28a5bc14584f022002989e7b4f7813b77acbe4babcf96d7ffbbe0bf14cba24672364f8e591479edb83483045022100c10688346a9d84647bde7027da07f0d79c6d4129307e4c6c9aea7bdbf25ac3350220269104209793c32c47491698c4e46ebea9c3293a1e4403f9abda39f79698f6b501008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
+ "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
+ "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
+ "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
{ 2,
- "3045022100fe87da8124ceecbcabb9d599c5339f40277c7c7406514fafbccbf180c7c09cf40220429c7fb6d0fd3705e931ab1219ab0432af38ae4d676008cc1964fbeb8cd35d2e",
- "3044022040ac769a851da31d8e4863e5f94719204f716c82a1ce6d6c52193d9a33b84bce022035df97b078ce80f20dca2109e4c6075af0b50148811452e7290e68b2680fced4",
- "02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe290400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fe87da8124ceecbcabb9d599c5339f40277c7c7406514fafbccbf180c7c09cf40220429c7fb6d0fd3705e931ab1219ab0432af38ae4d676008cc1964fbeb8cd35d2e83473044022040ac769a851da31d8e4863e5f94719204f716c82a1ce6d6c52193d9a33b84bce022035df97b078ce80f20dca2109e4c6075af0b50148811452e7290e68b2680fced401008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
+ "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
+ "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
+ "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
} );
}
//! The top-level channel management and payment tracking stuff lives here.
//!
-//! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
+//! The [`ChannelManager`] is the main chunk of logic implementing the lightning protocol and is
//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
//! upon reconnect to the relevant peer(s).
//!
use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
use crate::chain::transaction::{OutPoint, TransactionData};
+use crate::events;
+use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
use crate::ln::wire::Encode;
use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
use crate::util::config::{UserConfig, ChannelConfig};
-use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
-use crate::util::events;
use crate::util::wakers::{Future, Notifier};
use crate::util::scid_utils::fake_scid;
+use crate::util::string::UntrustedString;
use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
use crate::util::logger::{Level, Logger};
use crate::util::errors::APIError;
pub(super) routing: PendingHTLCRouting,
pub(super) incoming_shared_secret: [u8; 32],
payment_hash: PaymentHash,
+ /// Amount received
pub(super) incoming_amt_msat: Option<u64>, // Added in 0.0.113
+ /// Sender intended amount to forward or receive (actual amount received
+ /// may overshoot this in either case)
pub(super) outgoing_amt_msat: u64,
pub(super) outgoing_cltv_value: u32,
}
cltv_expiry: u32,
/// The amount (in msats) of this MPP part
value: u64,
+ /// The amount (in msats) that the sender intended to be sent in this MPP
+ /// part (used for validating total MPP amount)
+ sender_intended_value: u64,
onion_payload: OnionPayload,
timer_ticks: u8,
- /// The sum total of all MPP parts
+ /// The total value received for a payment (sum of all MPP parts if the payment is a MPP).
+ /// Gets set to the amount reported when pushing [`Event::PaymentClaimable`].
+ total_value_received: Option<u64>,
+ /// The sender intended sum total of all MPP parts specified in the onion
total_msat: u64,
}
/// A payment identifier used to uniquely identify a payment to LDK.
-/// (C-not exported) as we just use [u8; 32] directly
+///
+/// This is not exported to bindings users as we just use [u8; 32] directly
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
pub struct PaymentId(pub [u8; 32]);
}
/// An identifier used to uniquely identify an intercepted HTLC to LDK.
-/// (C-not exported) as we just use [u8; 32] directly
+///
+/// This is not exported to bindings users as we just use [u8; 32] directly
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
pub struct InterceptId(pub [u8; 32]);
first_hop_htlc_msat: u64,
payment_id: PaymentId,
payment_secret: Option<PaymentSecret>,
- /// Note that this is now "deprecated" - we write it for forwards (and read it for
- /// backwards) compatibility reasons, but prefer to use the data in the
- /// [`super::outbound_payment`] module, which stores per-payment data once instead of in
- /// each HTLC.
- payment_params: Option<PaymentParameters>,
},
}
#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
0u8.hash(hasher);
prev_hop_data.hash(hasher);
},
- HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat, payment_params } => {
+ HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat } => {
1u8.hash(hasher);
path.hash(hasher);
session_priv[..].hash(hasher);
payment_id.hash(hasher);
payment_secret.hash(hasher);
first_hop_htlc_msat.hash(hasher);
- payment_params.hash(hasher);
},
}
}
first_hop_htlc_msat: 0,
payment_id: PaymentId([2; 32]),
payment_secret: None,
- payment_params: None,
}
}
min_value_msat: Option<u64>,
}
-/// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
-/// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
+/// [`SimpleArcChannelManager`] is useful when you need a [`ChannelManager`] with a static lifetime, e.g.
+/// when you're using `lightning-net-tokio` (since `tokio::spawn` requires parameters with static
/// lifetimes). Other times you can afford a reference, which is more efficient, in which case
-/// SimpleRefChannelManager is the more appropriate type. Defining these type aliases prevents
-/// issues such as overly long function definitions. Note that the ChannelManager can take any type
-/// that implements KeysInterface or Router for its keys manager and router, respectively, but this
-/// type alias chooses the concrete types of KeysManager and DefaultRouter.
+/// [`SimpleRefChannelManager`] is the more appropriate type. Defining these type aliases prevents
+/// issues such as overly long function definitions. Note that the `ChannelManager` can take any type
+/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
+/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
+/// of [`KeysManager`] and [`DefaultRouter`].
///
-/// (C-not exported) as Arcs don't make sense in bindings
+/// This is not exported to bindings users as Arcs don't make sense in bindings
pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
Arc<M>,
Arc<T>,
Arc<L>
>;
-/// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference
-/// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't
+/// [`SimpleRefChannelManager`] is a type alias for a ChannelManager reference, and is the reference
+/// counterpart to the [`SimpleArcChannelManager`] type alias. Use this type by default when you don't
/// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as
-/// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes).
+/// usage of lightning-net-tokio (since `tokio::spawn` requires parameters with static lifetimes).
/// But if this is not necessary, using a reference is more efficient. Defining these type aliases
/// issues such as overly long function definitions. Note that the ChannelManager can take any type
-/// that implements KeysInterface or Router for its keys manager and router, respectively, but this
-/// type alias chooses the concrete types of KeysManager and DefaultRouter.
+/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
+/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
+/// of [`KeysManager`] and [`DefaultRouter`].
///
-/// (C-not exported) as Arcs don't make sense in bindings
+/// This is not exported to bindings users as Arcs don't make sense in bindings
pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
/// Manager which keeps track of a number of channels and sends messages to the appropriate
/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
///
-/// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
+/// Implements [`ChannelMessageHandler`], handling the multi-channel parts and passing things through
/// to individual Channels.
///
-/// Implements Writeable to write out all channel state to disk. Implies peer_disconnected() for
+/// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for
/// all peers during write/read (though does not modify this instance, only the instance being
-/// serialized). This will result in any channels which have not yet exchanged funding_created (ie
-/// called funding_transaction_generated for outbound channels).
+/// serialized). This will result in any channels which have not yet exchanged [`funding_created`] (i.e.,
+/// called [`funding_transaction_generated`] for outbound channels) being closed.
///
-/// Note that you can be a bit lazier about writing out ChannelManager than you can be with
-/// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before
-/// returning from chain::Watch::watch_/update_channel, with ChannelManagers, writing updates
-/// happens out-of-band (and will prevent any other ChannelManager operations from occurring during
+/// Note that you can be a bit lazier about writing out `ChannelManager` than you can be with
+/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST write each monitor update out to disk before
+/// returning from [`chain::Watch::watch_channel`]/[`update_channel`], with ChannelManagers, writing updates
+/// happens out-of-band (and will prevent any other `ChannelManager` operations from occurring during
/// the serialization process). If the deserialized version is out-of-date compared to the
-/// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
-/// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees).
+/// [`ChannelMonitor`] passed by reference to [`read`], those channels will be force-closed based on the
+/// `ChannelMonitor` state and no funds will be lost (mod on-chain transaction fees).
///
-/// Note that the deserializer is only implemented for (BlockHash, ChannelManager), which
-/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
-/// the "reorg path" (ie call block_disconnected() until you get to a common block and then call
-/// block_connected() to step towards your best block) upon deserialization before using the
-/// object!
+/// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which
+/// tells you the last block hash which was connected. You should get the best block tip before using the manager.
+/// See [`chain::Listen`] and [`chain::Confirm`] for more details.
///
-/// Note that ChannelManager is responsible for tracking liveness of its channels and generating
-/// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid
+/// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating
+/// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid
/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
/// offline for a full minute. In order to track this, you must call
-/// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect.
+/// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect.
///
-/// To avoid trivial DoS issues, ChannelManager limits the number of inbound connections and
+/// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and
/// inbound channels without confirmed funding transactions. This may result in nodes which we do
/// not have a channel with being unable to connect to us or open new channels with us if we have
/// many peers with unfunded channels.
/// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
/// never limited. Please ensure you limit the count of such channels yourself.
///
-/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
-/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
-/// essentially you should default to using a SimpleRefChannelManager, and use a
-/// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
+/// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`]
+/// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but
+/// essentially you should default to using a [`SimpleRefChannelManager`], and use a
+/// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when
/// you're using lightning-net-tokio.
+///
+/// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected
+/// [`funding_created`]: msgs::FundingCreated
+/// [`funding_transaction_generated`]: Self::funding_transaction_generated
+/// [`BlockHash`]: bitcoin::hash_types::BlockHash
+/// [`update_channel`]: chain::Watch::update_channel
+/// [`ChannelUpdate`]: msgs::ChannelUpdate
+/// [`timer_tick_occurred`]: Self::timer_tick_occurred
+/// [`read`]: ReadableArgs::read
//
// Lock order:
// The tree structure below illustrates the lock order requirements for the different locks of the
pub outbound_htlc_maximum_msat: Option<u64>,
}
-/// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
+/// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`]
#[derive(Clone, Debug, PartialEq)]
pub struct ChannelDetails {
/// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
/// inbound. This may be zero for inbound channels serialized with LDK versions prior to
/// 0.0.113.
pub user_channel_id: u128,
+ /// The currently negotiated fee rate denominated in satoshi per 1000 weight units,
+ /// which is applied to commitment and HTLC transactions.
+ ///
+ /// This value will be `None` for objects serialized with LDK versions prior to 0.0.115.
+ pub feerate_sat_per_1000_weight: Option<u32>,
/// Our total balance. This is the amount we would get if we close the channel.
/// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
/// amount is not likely to be recoverable on close.
outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
inbound_scid_alias: channel.latest_inbound_scid_alias(),
channel_value_satoshis: channel.get_value_satoshis(),
+ feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()),
unspendable_punishment_reserve: to_self_reserve_satoshis,
balance_msat: balance.balance_msat,
inbound_capacity_msat: balance.inbound_capacity_msat,
}}
}
+macro_rules! emit_channel_pending_event {
+ ($locked_events: expr, $channel: expr) => {
+ if $channel.should_emit_channel_pending_event() {
+ $locked_events.push(events::Event::ChannelPending {
+ channel_id: $channel.channel_id(),
+ former_temporary_channel_id: $channel.temporary_channel_id(),
+ counterparty_node_id: $channel.get_counterparty_node_id(),
+ user_channel_id: $channel.get_user_id(),
+ funding_txo: $channel.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+ });
+ $channel.set_channel_pending_event_emitted();
+ }
+ }
+}
+
macro_rules! emit_channel_ready_event {
- ($self: expr, $channel: expr) => {
+ ($locked_events: expr, $channel: expr) => {
if $channel.should_emit_channel_ready_event() {
- {
- let mut pending_events = $self.pending_events.lock().unwrap();
- pending_events.push(events::Event::ChannelReady {
- channel_id: $channel.channel_id(),
- user_channel_id: $channel.get_user_id(),
- counterparty_node_id: $channel.get_counterparty_node_id(),
- channel_type: $channel.get_channel_type().clone(),
- });
- }
+ debug_assert!($channel.channel_pending_event_emitted());
+ $locked_events.push(events::Event::ChannelReady {
+ channel_id: $channel.channel_id(),
+ user_channel_id: $channel.get_user_id(),
+ counterparty_node_id: $channel.get_counterparty_node_id(),
+ channel_type: $channel.get_channel_type().clone(),
+ });
$channel.set_channel_ready_event_emitted();
}
}
R::Target: Router,
L::Target: Logger,
{
- /// Constructs a new ChannelManager to hold several channels and route between them.
+ /// Constructs a new `ChannelManager` to hold several channels and route between them.
///
/// This is the main "logic hub" for all channel-related actions, and implements
- /// ChannelMessageHandler.
+ /// [`ChannelMessageHandler`].
///
/// Non-proportional fees are fixed according to our risk using the provided fee estimator.
///
- /// Users need to notify the new ChannelManager when a new block is connected or
- /// disconnected using its `block_connected` and `block_disconnected` methods, starting
- /// from after `params.latest_hash`.
+ /// Users need to notify the new `ChannelManager` when a new block is connected or
+ /// disconnected using its [`block_connected`] and [`block_disconnected`] methods, starting
+ /// from after [`params.best_block.block_hash`]. See [`chain::Listen`] and [`chain::Confirm`] for
+ /// more details.
+ ///
+ /// [`block_connected`]: chain::Listen::block_connected
+ /// [`block_disconnected`]: chain::Listen::block_disconnected
+ /// [`params.best_block.block_hash`]: chain::BestBlock::block_hash
pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters) -> Self {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
res
}
- /// Gets the list of open channels, in random order. See ChannelDetail field documentation for
+ /// Gets the list of open channels, in random order. See [`ChannelDetails`] field documentation for
/// more information.
pub fn list_channels(&self) -> Vec<ChannelDetails> {
self.list_channels_with_filter(|_| true)
/// would appear on a force-closure transaction, whichever is lower. We will allow our
/// counterparty to pay as much fee as they'd like, however.
///
- /// May generate a SendShutdown message event on success, which should be relayed.
+ /// May generate a [`SendShutdown`] message event on success, which should be relayed.
///
/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
/// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
/// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+ /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
self.close_channel_internal(channel_id, counterparty_node_id, None)
}
/// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
/// will appear on a force-closure transaction, whichever is lower).
///
- /// May generate a SendShutdown message event on success, which should be relayed.
+ /// May generate a [`SendShutdown`] message event on success, which should be relayed.
///
/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
/// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
/// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+ /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight))
}
let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
if let Some(peer_msg) = peer_msg {
- self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
+ self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) });
} else {
self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
}
payment_hash: PaymentHash, amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>) -> Result<PendingHTLCInfo, ReceiveError>
{
// final_incorrect_cltv_expiry
- if hop_data.outgoing_cltv_value != cltv_expiry {
+ if hop_data.outgoing_cltv_value > cltv_expiry {
return Err(ReceiveError {
- msg: "Upstream node set CLTV to the wrong value",
+ msg: "Upstream node set CLTV to less than the CLTV set by the sender",
err_code: 18,
err_data: cltv_expiry.to_be_bytes().to_vec()
})
payment_hash,
incoming_shared_secret: shared_secret,
incoming_amt_msat: Some(amt_msat),
- outgoing_amt_msat: amt_msat,
+ outgoing_amt_msat: hop_data.amt_to_forward,
outgoing_cltv_value: hop_data.outgoing_cltv_value,
})
}
pending_forward_info
}
- /// Gets the current channel_update for the given channel. This first checks if the channel is
+ /// Gets the current [`channel_update`] for the given channel. This first checks if the channel is
/// public, and thus should be called whenever the result is going to be passed out in a
/// [`MessageSendEvent::BroadcastChannelUpdate`] event.
///
- /// Note that in `internal_closing_signed`, this function is called without the `peer_state`
+ /// Note that in [`internal_closing_signed`], this function is called without the `peer_state`
/// corresponding to the channel's counterparty locked, as the channel been removed from the
/// storage and the `peer_state` lock has been dropped.
+ ///
+ /// [`channel_update`]: msgs::ChannelUpdate
+ /// [`internal_closing_signed`]: Self::internal_closing_signed
fn get_channel_update_for_broadcast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
if !chan.should_announce() {
return Err(LightningError {
self.get_channel_update_for_unicast(chan)
}
- /// Gets the current channel_update for the given channel. This does not check if the channel
- /// is public (only returning an Err if the channel does not yet have an assigned short_id),
+ /// Gets the current [`channel_update`] for the given channel. This does not check if the channel
+ /// is public (only returning an `Err` if the channel does not yet have an assigned SCID),
/// and thus MUST NOT be called unless the recipient of the resulting message has already
/// provided evidence that they know about the existence of the channel.
///
- /// Note that through `internal_closing_signed`, this function is called without the
+ /// Note that through [`internal_closing_signed`], this function is called without the
/// `peer_state` corresponding to the channel's counterparty locked, as the channel been
/// removed from the storage and the `peer_state` lock has been dropped.
+ ///
+ /// [`channel_update`]: msgs::ChannelUpdate
+ /// [`internal_closing_signed`]: Self::internal_closing_signed
fn get_channel_update_for_unicast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
}
#[cfg(test)]
- pub(crate) fn test_send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ pub(crate) fn test_send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
let _lck = self.total_consistency_lock.read().unwrap();
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes)
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes)
}
- fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+ fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
// The top-level caller should hold the total_consistency_lock read lock.
debug_assert!(self.total_consistency_lock.try_write().is_err());
first_hop_htlc_msat: htlc_msat,
payment_id,
payment_secret: payment_secret.clone(),
- payment_params: payment_params.clone(),
}, onion_packet, &self.logger);
match break_chan_entry!(self, send_res, chan) {
Some(monitor_update) => {
/// Value parameters are provided via the last hop in route, see documentation for [`RouteHop`]
/// fields for more info.
///
- /// May generate SendHTLCs message(s) event on success, which should be relayed (e.g. via
+ /// May generate [`UpdateHTLCs`] message(s) event on success, which should be relayed (e.g. via
/// [`PeerManager::process_events`]).
///
/// # Avoiding Duplicate Payments
///
/// # Possible Error States on [`PaymentSendFailure`]
///
- /// Each path may have a different return value, and PaymentSendValue may return a Vec with
+ /// Each path may have a different return value, and [`PaymentSendFailure`] may return a `Vec` with
/// each entry matching the corresponding-index entry in the route paths, see
/// [`PaymentSendFailure`] for more info.
///
/// * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the
/// relevant updates.
///
- /// Note that depending on the type of the PaymentSendFailure the HTLC may have been
+ /// Note that depending on the type of the [`PaymentSendFailure`] the HTLC may have been
/// irrevocably committed to on our end. In such a case, do NOT retry the payment with a
/// different route unless you intend to pay twice!
///
///
/// [`Event::PaymentSent`]: events::Event::PaymentSent
/// [`Event::PaymentFailed`]: events::Event::PaymentFailed
+ /// [`UpdateHTLCs`]: events::MessageSendEvent::UpdateHTLCs
/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
/// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments
.send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on
&self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
&self.entropy_source, &self.node_signer, best_block_height, &self.logger,
&self.pending_events,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
#[cfg(test)]
- fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
+ pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
#[cfg(test)]
self.pending_outbound_payments.send_spontaneous_payment_with_route(
route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer,
best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route
retry_strategy, route_params, &self.router, self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.logger, &self.pending_events,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Send a payment that is probing the given route for liquidity. We calculate the
let best_block_height = self.best_block.read().unwrap().height();
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a
/// implemented by Bitcoin Core wallet. See <https://bitcoinops.org/en/topics/fee-sniping/>
/// for more details.
///
- /// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady
- /// [`Event::ChannelClosed`]: crate::util::events::Event::ChannelClosed
+ /// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
+ /// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
forward_info: PendingHTLCInfo {
- routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, ..
+ routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat, ..
}
}) => {
let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing {
panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
}
};
- let claimable_htlc = ClaimableHTLC {
+ let mut claimable_htlc = ClaimableHTLC {
prev_hop: HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
outpoint: prev_funding_outpoint,
incoming_packet_shared_secret: incoming_shared_secret,
phantom_shared_secret,
},
- value: outgoing_amt_msat,
+ // We differentiate the received value from the sender intended value
+ // if possible so that we don't prematurely mark MPP payments complete
+ // if routing nodes overpay
+ value: incoming_amt_msat.unwrap_or(outgoing_amt_msat),
+ sender_intended_value: outgoing_amt_msat,
timer_ticks: 0,
+ total_value_received: None,
total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat },
cltv_expiry,
onion_payload,
fail_htlc!(claimable_htlc, payment_hash);
continue
}
- let (_, htlcs) = claimable_payments.claimable_htlcs.entry(payment_hash)
+ let (_, ref mut htlcs) = claimable_payments.claimable_htlcs.entry(payment_hash)
.or_insert_with(|| (purpose(), Vec::new()));
if htlcs.len() == 1 {
if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
continue
}
}
- let mut total_value = claimable_htlc.value;
+ let mut total_value = claimable_htlc.sender_intended_value;
for htlc in htlcs.iter() {
- total_value += htlc.value;
+ total_value += htlc.sender_intended_value;
match &htlc.onion_payload {
OnionPayload::Invoice { .. } => {
if htlc.total_msat != $payment_data.total_msat {
_ => unreachable!(),
}
}
- if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat {
- log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
- log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
+ // The condition determining whether an MPP is complete must
+ // match exactly the condition used in `timer_tick_occurred`
+ if total_value >= msgs::MAX_VALUE_MSAT {
fail_htlc!(claimable_htlc, payment_hash);
- } else if total_value == $payment_data.total_msat {
+ } else if total_value - claimable_htlc.sender_intended_value >= $payment_data.total_msat {
+ log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable",
+ log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc, payment_hash);
+ } else if total_value >= $payment_data.total_msat {
let prev_channel_id = prev_funding_outpoint.to_channel_id();
htlcs.push(claimable_htlc);
+ let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
+ htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
new_events.push(events::Event::PaymentClaimable {
receiver_node_id: Some(receiver_node_id),
payment_hash,
purpose: purpose(),
- amount_msat: total_value,
+ amount_msat,
via_channel_id: Some(prev_channel_id),
via_user_channel_id: Some(prev_user_channel_id),
});
}
match claimable_payments.claimable_htlcs.entry(payment_hash) {
hash_map::Entry::Vacant(e) => {
+ let amount_msat = claimable_htlc.value;
+ claimable_htlc.total_value_received = Some(amount_msat);
let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
e.insert((purpose.clone(), vec![claimable_htlc]));
let prev_channel_id = prev_funding_outpoint.to_channel_id();
new_events.push(events::Event::PaymentClaimable {
receiver_node_id: Some(receiver_node_id),
payment_hash,
- amount_msat: outgoing_amt_msat,
+ amount_msat,
purpose,
via_channel_id: Some(prev_channel_id),
via_user_channel_id: Some(prev_user_channel_id),
self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.pending_events, &self.logger,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv));
+ |path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv));
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<SP::Target as SignerProvider>::Signer>, new_feerate: u32) -> NotifyOption {
if !chan.is_outbound() { return NotifyOption::SkipPersist; }
// If the feerate has decreased by less than half, don't bother
- if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
+ if new_feerate <= chan.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.get_feerate_sat_per_1000_weight() {
log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
- log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
return NotifyOption::SkipPersist;
}
if !chan.is_live() {
log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
- log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
return NotifyOption::SkipPersist;
}
log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
- log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
chan.queue_update_fee(new_feerate, &self.logger);
NotifyOption::DoPersist
///
/// This currently includes:
/// * Increasing or decreasing the on-chain feerate estimates for our outbound channels,
- /// * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more
+ /// * Broadcasting [`ChannelUpdate`] messages if we've been disconnected from our peer for more
/// than a minute, informing the network that they should no longer attempt to route over
/// the channel.
- /// * Expiring a channel's previous `ChannelConfig` if necessary to only allow forwarding HTLCs
- /// with the current `ChannelConfig`.
+ /// * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs
+ /// with the current [`ChannelConfig`].
/// * Removing peers which have disconnected but and no longer have any channels.
///
- /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
+ /// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate
/// estimate fetches.
+ ///
+ /// [`ChannelUpdate`]: msgs::ChannelUpdate
+ /// [`ChannelConfig`]: crate::util::config::ChannelConfig
pub fn timer_tick_occurred(&self) {
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
let mut should_persist = NotifyOption::SkipPersist;
if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
// Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
// In this case we're not going to handle any timeouts of the parts here.
- if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
+ // This condition determining whether the MPP is complete here must match
+ // exactly the condition used in `process_pending_htlc_forwards`.
+ if htlcs[0].total_msat <= htlcs.iter().fold(0, |total, htlc| total + htlc.sender_intended_value) {
return true;
} else if htlcs.into_iter().any(|htlc| {
htlc.timer_ticks += 1;
// from block_connected which may run during initialization prior to the chain_monitor
// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
match source {
- HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, ref payment_params, .. } => {
+ HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
- session_priv, payment_id, payment_params, self.probing_cookie_secret, &self.secp_ctx,
+ session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
&self.pending_events, &self.logger)
{ self.push_pending_forwards_ev(); }
},
/// event matches your expectation. If you fail to do so and call this method, you may provide
/// the sender "proof-of-payment" when they did not fulfill the full expected payment.
///
- /// [`Event::PaymentClaimable`]: crate::util::events::Event::PaymentClaimable
- /// [`Event::PaymentClaimed`]: crate::util::events::Event::PaymentClaimed
+ /// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable
+ /// [`Event::PaymentClaimed`]: crate::events::Event::PaymentClaimed
/// [`process_pending_events`]: EventsProvider::process_pending_events
/// [`create_inbound_payment`]: Self::create_inbound_payment
/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
// provide the preimage, so worrying too much about the optimal handling isn't worth
// it.
let mut claimable_amt_msat = 0;
+ let mut prev_total_msat = None;
let mut expected_amt_msat = None;
let mut valid_mpp = true;
let mut errs = Vec::new();
break;
}
- if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) {
- log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!");
+ if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) {
+ log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!");
debug_assert!(false);
valid_mpp = false;
break;
}
+ prev_total_msat = Some(htlc.total_msat);
+
+ if expected_amt_msat.is_some() && expected_amt_msat != htlc.total_value_received {
+ log_error!(self.logger, "Somehow ended up with an MPP payment with different received total amounts - this should not be reachable!");
+ debug_assert!(false);
+ valid_mpp = false;
+ break;
+ }
+ expected_amt_msat = htlc.total_value_received;
- expected_amt_msat = Some(htlc.total_msat);
if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
// We don't currently support MPP for spontaneous payments, so just check
// that there's one payment here and move on.
claim_from_onchain_tx: from_onchain,
prev_channel_id,
next_channel_id,
+ outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
}})
} else { None }
});
});
}
- emit_channel_ready_event!(self, channel);
-
macro_rules! handle_cs { () => {
if let Some(update) = commitment_update {
pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
self.tx_broadcaster.broadcast_transaction(&tx);
}
+ {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ emit_channel_pending_event!(pending_events, channel);
+ emit_channel_ready_event!(pending_events, channel);
+ }
+
htlc_forwards
}
}
}
- emit_channel_ready_event!(self, chan.get_mut());
+ {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ emit_channel_ready_event!(pending_events, chan.get_mut());
+ }
Ok(())
},
Ok(())
}
- /// Process pending events from the `chain::Watch`, returning whether any events were processed.
+ /// Process pending events from the [`chain::Watch`], returning whether any events were processed.
fn process_pending_monitor_events(&self) -> bool {
debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
}
}
- emit_channel_ready_event!(self, channel);
+ {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ emit_channel_ready_event!(pending_events, channel);
+ }
if let Some(announcement_sigs) = announcement_sigs {
log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id()));
}
}
- /// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
- /// indicating whether persistence is necessary. Only one listener on
- /// [`await_persistable_update`], [`await_persistable_update_timeout`], or a future returned by
- /// [`get_persistable_update_future`] is guaranteed to be woken up.
- ///
- /// Note that this method is not available with the `no-std` feature.
+ /// Gets a [`Future`] that completes when this [`ChannelManager`] needs to be persisted.
///
- /// [`await_persistable_update`]: Self::await_persistable_update
- /// [`await_persistable_update_timeout`]: Self::await_persistable_update_timeout
- /// [`get_persistable_update_future`]: Self::get_persistable_update_future
- #[cfg(any(test, feature = "std"))]
- pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool {
- self.persistence_notifier.wait_timeout(max_wait)
- }
-
- /// Blocks until ChannelManager needs to be persisted. Only one listener on
- /// [`await_persistable_update`], `await_persistable_update_timeout`, or a future returned by
- /// [`get_persistable_update_future`] is guaranteed to be woken up.
+ /// Note that callbacks registered on the [`Future`] MUST NOT call back into this
+ /// [`ChannelManager`] and should instead register actions to be taken later.
///
- /// [`await_persistable_update`]: Self::await_persistable_update
- /// [`get_persistable_update_future`]: Self::get_persistable_update_future
- pub fn await_persistable_update(&self) {
- self.persistence_notifier.wait()
- }
-
- /// Gets a [`Future`] that completes when a persistable update is available. Note that
- /// callbacks registered on the [`Future`] MUST NOT call back into this [`ChannelManager`] and
- /// should instead register actions to be taken later.
pub fn get_persistable_update_future(&self) -> Future {
self.persistence_notifier.get_future()
}
/// [`ChannelManager`].
pub fn provided_init_features(_config: &UserConfig) -> InitFeatures {
// Note that if new features are added here which other peers may (eventually) require, we
- // should also add the corresponding (optional) bit to the ChannelMessageHandler impl for
- // ErroringMessageHandler.
+ // should also add the corresponding (optional) bit to the [`ChannelMessageHandler`] impl for
+ // [`ErroringMessageHandler`].
let mut features = InitFeatures::empty();
features.set_data_loss_protect_optional();
features.set_upfront_shutdown_script_optional();
(33, self.inbound_htlc_minimum_msat, option),
(35, self.inbound_htlc_maximum_msat, option),
(37, user_channel_id_high_opt, option),
+ (39, self.feerate_sat_per_1000_weight, option),
});
Ok(())
}
(33, inbound_htlc_minimum_msat, option),
(35, inbound_htlc_maximum_msat, option),
(37, user_channel_id_high_opt, option),
+ (39, feerate_sat_per_1000_weight, option),
});
// `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
is_public: is_public.0.unwrap(),
inbound_htlc_minimum_msat,
inbound_htlc_maximum_msat,
+ feerate_sat_per_1000_weight,
})
}
}
(0, self.prev_hop, required),
(1, self.total_msat, required),
(2, self.value, required),
+ (3, self.sender_intended_value, required),
(4, payment_data, option),
+ (5, self.total_value_received, option),
(6, self.cltv_expiry, required),
(8, keysend_preimage, option),
});
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
let mut prev_hop = crate::util::ser::RequiredWrapper(None);
let mut value = 0;
+ let mut sender_intended_value = None;
let mut payment_data: Option<msgs::FinalOnionHopData> = None;
let mut cltv_expiry = 0;
+ let mut total_value_received = None;
let mut total_msat = None;
let mut keysend_preimage: Option<PaymentPreimage> = None;
read_tlv_fields!(reader, {
(0, prev_hop, required),
(1, total_msat, option),
(2, value, required),
+ (3, sender_intended_value, option),
(4, payment_data, option),
+ (5, total_value_received, option),
(6, cltv_expiry, required),
(8, keysend_preimage, option)
});
prev_hop: prev_hop.0.unwrap(),
timer_ticks: 0,
value,
+ sender_intended_value: sender_intended_value.unwrap_or(value),
+ total_value_received,
total_msat: total_msat.unwrap(),
onion_payload,
cltv_expiry,
path,
payment_id: payment_id.unwrap(),
payment_secret,
- payment_params,
})
}
1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
impl Writeable for HTLCSource {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
match self {
- HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret, payment_params } => {
+ HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret } => {
0u8.write(writer)?;
let payment_id_opt = Some(payment_id);
write_tlv_fields!(writer, {
(2, first_hop_htlc_msat, required),
(3, payment_secret, option),
(4, *path, vec_type),
- (5, payment_params, option),
+ (5, None::<PaymentParameters>, option), // payment_params in LDK versions prior to 0.0.115
});
}
HTLCSource::PreviousHopData(ref field) => {
/// In such cases the latest local transactions will be sent to the tx_broadcaster included in
/// this struct.
///
- /// (C-not exported) because we have no HashMap bindings
+ /// This is not exported to bindings users because we have no HashMap bindings
pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<SP::Target as SignerProvider>::Signer>>,
}
let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = Vec::new();
+ let mut pending_background_events = Vec::new();
for _ in 0..channel_count {
let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
- let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
+ let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
+ if let Some(monitor_update) = monitor_update {
+ pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update));
+ }
failed_htlcs.append(&mut new_failed_htlcs);
- monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
channel_closures.push(events::Event::ChannelClosed {
channel_id: channel.channel_id(),
user_channel_id: channel.get_user_id(),
}
}
- for (funding_txo, monitor) in args.channel_monitors.iter_mut() {
+ for (funding_txo, _) in args.channel_monitors.iter() {
if !funding_txo_set.contains(funding_txo) {
- log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id()));
- monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: CLOSED_CHANNEL_UPDATE_ID,
+ updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
+ };
+ pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update)));
}
}
}
let background_event_count: u64 = Readable::read(reader)?;
- let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
for _ in 0..background_event_count {
match <u8 as Readable>::read(reader)? {
- 0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))),
+ 0 => {
+ let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?);
+ if pending_background_events.iter().find(|e| {
+ let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e;
+ *pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update
+ }).is_none() {
+ pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)));
+ }
+ }
_ => return Err(DecodeError::InvalidValue),
}
}
per_peer_state: FairRwLock::new(per_peer_state),
pending_events: Mutex::new(pending_events_read),
- pending_background_events: Mutex::new(pending_background_events_read),
+ pending_background_events: Mutex::new(pending_background_events),
total_consistency_lock: RwLock::new(()),
persistence_notifier: Notifier::new(),
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+ #[cfg(feature = "std")]
use core::time::Duration;
use core::sync::atomic::Ordering;
+ use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::channelmanager::{inbound_payment, PaymentId, PaymentSendFailure, InterceptId};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::ChannelMessageHandler;
use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
use crate::util::errors::APIError;
- use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use crate::util::test_utils;
use crate::util::config::ChannelConfig;
use crate::chain::keysinterface::EntropySource;
// All nodes start with a persistable update pending as `create_network` connects each node
// with all other nodes to make most tests simpler.
- assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
- assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
- assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[2].node.get_persistable_update_future().poll_is_complete());
let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
&nodes[0].node.get_our_node_id()).pop().unwrap();
// The first two nodes (which opened a channel) should now require fresh persistence
- assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
- assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
// ... but the last node should not.
- assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
// After persisting the first two nodes they should no longer need fresh persistence.
- assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
- assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
// Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
// about the channel.
nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
- assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
// The nodes which are a party to the channel should also ignore messages from unrelated
// parties.
nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
- assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
- assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
// At this point the channel info given by peers should still be the same.
assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
// persisted and that its channel info remains the same.
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
- assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
- assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
// the channel info has updated.
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
- assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
- assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
}
// indicates there are more HTLCs coming.
let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &mpp_route).unwrap();
- nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
expect_payment_failed!(nodes[0], our_payment_hash, true);
// Send the second half of the original MPP payment.
- nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
assert!(updates.update_fee.is_none());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
- nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1);
+ nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1);
}
#[test]
assert!(updates.update_fee.is_none());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
- nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1);
+ nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "We don't support MPP keysend payments", 1);
}
#[test]
match nodes[0].node.send_payment(&route, payment_hash, &None, PaymentId(payment_hash.0)).unwrap_err() {
PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => {
- assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) },
+ assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err))
+ },
_ => panic!("unexpected error")
}
}
match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
Ok(_) => panic!("Unexpected ok"),
Err(()) => {
- nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment".to_string(), "Failing HTLC with user-generated payment_hash".to_string(), 1);
+ nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment", "Failing HTLC with user-generated payment_hash", 1);
}
}
assert_eq!(nodes_0_lock.len(), 1);
assert!(nodes_0_lock.contains_key(channel_id));
}
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
{
// Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
check_added_monitors!(nodes[0], 1);
+ expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors!(nodes[1], 1);
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
check_added_monitors!(nodes[0], 1);
+ expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
}
open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
}
use crate::chain::Listen;
use crate::chain::chainmonitor::{ChainMonitor, Persist};
use crate::chain::keysinterface::{EntropySource, KeysManager, InMemorySigner};
- use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId};
+ use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+ use crate::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::{ChannelMessageHandler, Init};
use crate::routing::gossip::NetworkGraph;
use crate::routing::router::{PaymentParameters, get_route};
use crate::util::test_utils;
use crate::util::config::UserConfig;
- use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
} else { panic!(); }
node_b.handle_funding_created(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
+ let events_b = node_b.get_and_clear_pending_events();
+ assert_eq!(events_b.len(), 1);
+ match events_b[0] {
+ Event::ChannelPending{ ref counterparty_node_id, .. } => {
+ assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ }
+
node_a.handle_funding_signed(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
+ let events_a = node_a.get_and_clear_pending_events();
+ assert_eq!(events_a.len(), 1);
+ match events_a[0] {
+ Event::ChannelPending{ ref counterparty_node_id, .. } => {
+ assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ }
assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
/// Tracks the set of features which a node implements, templated by the context in which it
/// appears.
///
-/// (C-not exported) as we map the concrete feature types below directly instead
+/// This is not exported to bindings users as we map the concrete feature types below directly instead
#[derive(Eq)]
pub struct Features<T: sealed::Context> {
/// Note that, for convenience, flags is LITTLE endian (despite being big-endian on the wire)
/// Create a Features given a set of flags, in little-endian. This is in reverse byte order from
/// most on-the-wire encodings.
- /// (C-not exported) as we don't support export across multiple T
+ ///
+ /// This is not exported to bindings users as we don't support export across multiple T
pub fn from_le_bytes(flags: Vec<u8>) -> Features<T> {
Features {
flags,
use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch, keysinterface::EntropySource};
use crate::chain::channelmonitor::ChannelMonitor;
use crate::chain::transaction::OutPoint;
+use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose};
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, PaymentId, MIN_CLTV_EXPIRY_DELTA};
use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate};
use crate::ln::features::InitFeatures;
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
-use crate::util::events::ClosureReason;
use crate::util::enforcing_trait_impls::EnforcingSigner;
use crate::util::scid_utils;
use crate::util::test_utils;
use crate::util::test_utils::{panicking, TestChainMonitor, TestScorer, TestKeysInterface};
-use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose};
use crate::util::errors::APIError;
use crate::util::config::UserConfig;
use crate::util::ser::{ReadableArgs, Writeable};
scid
}
/// Mine a single block containing the given transaction
-pub fn mine_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
+///
+/// Returns the SCID a channel confirmed in the given transaction will have, assuming the funding
+/// output is the 1st output in the transaction.
+pub fn mine_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> u64 {
let height = node.best_block_info().1 + 1;
- confirm_transaction_at(node, tx, height);
+ confirm_transaction_at(node, tx, height)
}
/// Mine a single block containing the given transactions
pub fn mine_transactions<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, txn: &[&Transaction]) {
let mut per_peer_state_lock;
let mut peer_state_lock;
let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
- chan.get_feerate()
+ chan.get_feerate_sat_per_1000_weight()
}
}
}
assert_eq!(added_monitors[0].0, funding_output);
added_monitors.clear();
}
+ expect_channel_pending_event(&node_b, &node_a.node.get_our_node_id());
node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id()));
{
assert_eq!(added_monitors[0].0, funding_output);
added_monitors.clear();
}
+ expect_channel_pending_event(&node_a, &node_b.node.get_our_node_id());
let events_4 = node_a.node.get_and_clear_pending_events();
assert_eq!(events_4.len(), 0);
MessageSendEvent::SendFundingSigned { node_id, msg } => {
assert_eq!(*node_id, initiator.node.get_our_node_id());
initiator.node.handle_funding_signed(&receiver.node.get_our_node_id(), &msg);
+ expect_channel_pending_event(&initiator, &receiver.node.get_our_node_id());
+ expect_channel_pending_event(&receiver, &initiator.node.get_our_node_id());
check_added_monitors!(initiator, 1);
assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
MessageSendEvent::SendChannelReady { node_id, msg } => {
assert_eq!(*node_id, initiator.node.get_our_node_id());
initiator.node.handle_channel_ready(&receiver.node.get_our_node_id(), &msg);
+ expect_channel_ready_event(&initiator, &receiver.node.get_our_node_id());
}
_ => panic!("Unexpected event"),
}
receiver.node.handle_channel_ready(&initiator.node.get_our_node_id(), &as_channel_ready);
+ expect_channel_ready_event(&receiver, &initiator.node.get_our_node_id());
let as_channel_update = get_event_msg!(initiator, MessageSendEvent::SendChannelUpdate, receiver.node.get_our_node_id());
let bs_channel_update = get_event_msg!(receiver, MessageSendEvent::SendChannelUpdate, initiator.node.get_our_node_id());
assert_eq!(initiator.node.list_usable_channels().len(), initiator_channels + 1);
assert_eq!(receiver.node.list_usable_channels().len(), receiver_channels + 1);
- expect_channel_ready_event(&initiator, &receiver.node.get_our_node_id());
- expect_channel_ready_event(&receiver, &initiator.node.get_our_node_id());
-
(tx, as_channel_ready.channel_id)
}
check_added_monitors!(nodes[b], 1);
let cs_funding_signed = get_event_msg!(nodes[b], MessageSendEvent::SendFundingSigned, nodes[a].node.get_our_node_id());
+ expect_channel_pending_event(&nodes[b], &nodes[a].node.get_our_node_id());
+
nodes[a].node.handle_funding_signed(&nodes[b].node.get_our_node_id(), &cs_funding_signed);
+ expect_channel_pending_event(&nodes[a], &nodes[b].node.get_our_node_id());
check_added_monitors!(nodes[a], 1);
+ assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+ assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
+ nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+
let conf_height = core::cmp::max(nodes[a].best_block_info().1 + 1, nodes[b].best_block_info().1 + 1);
confirm_transaction_at(&nodes[a], &tx, conf_height);
connect_blocks(&nodes[a], CHAN_CONFIRM_DEPTH - 1);
/// Check that a channel's closing channel update has been broadcasted, and optionally
/// check whether an error message event has occurred.
-pub fn check_closed_broadcast(node: &Node, with_error_msg: bool) -> Option<msgs::ErrorMessage> {
+pub fn check_closed_broadcast(node: &Node, num_channels: usize, with_error_msg: bool) -> Vec<msgs::ErrorMessage> {
let msg_events = node.node.get_and_clear_pending_msg_events();
- assert_eq!(msg_events.len(), if with_error_msg { 2 } else { 1 });
- match msg_events[0] {
- MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
- assert_eq!(msg.contents.flags & 2, 2);
- },
- _ => panic!("Unexpected event"),
- }
- if with_error_msg {
- match msg_events[1] {
+ assert_eq!(msg_events.len(), if with_error_msg { num_channels * 2 } else { num_channels });
+ msg_events.into_iter().filter_map(|msg_event| {
+ match msg_event {
+ MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+ assert_eq!(msg.contents.flags & 2, 2);
+ None
+ },
MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
+ assert!(with_error_msg);
// TODO: Check node_id
Some(msg.clone())
},
_ => panic!("Unexpected event"),
}
- } else { None }
+ }).collect()
}
/// Check that a channel's closing channel update has been broadcasted, and optionally
#[macro_export]
macro_rules! check_closed_broadcast {
($node: expr, $with_error_msg: expr) => {
- $crate::ln::functional_test_utils::check_closed_broadcast(&$node, $with_error_msg)
+ $crate::ln::functional_test_utils::check_closed_broadcast(&$node, 1, $with_error_msg).pop()
}
}
($events: expr, $expected_failures: expr) => {{
for event in $events {
match event {
- $crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
- $crate::util::events::Event::HTLCHandlingFailed { ref failed_next_destination, .. } => {
+ $crate::events::Event::PendingHTLCsForwardable { .. } => { },
+ $crate::events::Event::HTLCHandlingFailed { ref failed_next_destination, .. } => {
assert!($expected_failures.contains(&failed_next_destination))
},
_ => panic!("Unexpected destination"),
if fail_backwards {
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node_a,
- vec![crate::util::events::HTLCDestination::NextHopChannel{ node_id: Some(node_b.node.get_our_node_id()), channel_id: commitment_signed.channel_id }]);
+ vec![crate::events::HTLCDestination::NextHopChannel{ node_id: Some(node_b.node.get_our_node_id()), channel_id: commitment_signed.channel_id }]);
check_added_monitors!(node_a, 1);
let node_a_per_peer_state = node_a.node.per_peer_state.read().unwrap();
let events = $node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- $crate::util::events::Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id: _, via_user_channel_id: _ } => {
+ $crate::events::Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id: _, via_user_channel_id: _ } => {
assert_eq!($expected_payment_hash, *payment_hash);
assert_eq!($expected_recv_value, amount_msat);
assert_eq!($expected_receiver_node_id, receiver_node_id.unwrap());
match purpose {
- $crate::util::events::PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ $crate::events::PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
assert_eq!(&$expected_payment_preimage, payment_preimage);
assert_eq!($expected_payment_secret, *payment_secret);
},
let events = $node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- $crate::util::events::Event::PaymentClaimed { ref payment_hash, amount_msat, .. } => {
+ $crate::events::Event::PaymentClaimed { ref payment_hash, amount_msat, .. } => {
assert_eq!($expected_payment_hash, *payment_hash);
assert_eq!($expected_recv_value, amount_msat);
},
assert_eq!(events.len(), 1);
}
let expected_payment_id = match events[0] {
- $crate::util::events::Event::PaymentSent { ref payment_id, ref payment_preimage, ref payment_hash, ref fee_paid_msat } => {
+ $crate::events::Event::PaymentSent { ref payment_id, ref payment_preimage, ref payment_hash, ref fee_paid_msat } => {
assert_eq!($expected_payment_preimage, *payment_preimage);
assert_eq!(expected_payment_hash, *payment_hash);
assert!(fee_paid_msat.is_some());
if $expect_paths {
for i in 1..events.len() {
match events[i] {
- $crate::util::events::Event::PaymentPathSuccessful { payment_id, payment_hash, .. } => {
+ $crate::events::Event::PaymentPathSuccessful { payment_id, payment_hash, .. } => {
assert_eq!(payment_id, expected_payment_id);
assert_eq!(payment_hash, Some(expected_payment_hash));
},
let events = $node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- $crate::util::events::Event::PaymentPathSuccessful { .. } => {},
+ $crate::events::Event::PaymentPathSuccessful { .. } => {},
_ => panic!("Unexpected event"),
}
}
let events = $node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
+ Event::PaymentForwarded {
+ fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
+ outbound_amount_forwarded_msat: _
+ } => {
assert_eq!(fee_earned_msat, $expected_fee);
if fee_earned_msat.is_some() {
// Is the event prev_channel_id in one of the channels between the two nodes?
}
#[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))]
-pub fn expect_channel_ready_event<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, expected_counterparty_node_id: &PublicKey) {
+pub fn expect_channel_pending_event<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, expected_counterparty_node_id: &PublicKey) {
let events = node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- crate::util::events::Event::ChannelReady{ ref counterparty_node_id, .. } => {
+ crate::events::Event::ChannelPending { ref counterparty_node_id, .. } => {
assert_eq!(*expected_counterparty_node_id, *counterparty_node_id);
},
_ => panic!("Unexpected event"),
}
}
+#[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))]
+pub fn expect_channel_ready_event<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, expected_counterparty_node_id: &PublicKey) {
+ let events = node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ crate::events::Event::ChannelReady{ ref counterparty_node_id, .. } => {
+ assert_eq!(*expected_counterparty_node_id, *counterparty_node_id);
+ },
+ _ => panic!("Unexpected event"),
+ }
+}
pub struct PaymentFailedConditions<'a> {
pub(crate) expected_htlc_error_data: Option<(u16, &'a [u8])>,
) {
if conditions.expected_mpp_parts_remain { assert_eq!(payment_failed_events.len(), 1); } else { assert_eq!(payment_failed_events.len(), 2); }
let expected_payment_id = match &payment_failed_events[0] {
- Event::PaymentPathFailed { payment_hash, payment_failed_permanently, path, retry, payment_id, failure, short_channel_id,
+ Event::PaymentPathFailed { payment_hash, payment_failed_permanently, payment_id, failure,
#[cfg(test)]
error_code,
#[cfg(test)]
error_data, .. } => {
assert_eq!(*payment_hash, expected_payment_hash, "unexpected payment_hash");
assert_eq!(*payment_failed_permanently, expected_payment_failed_permanently, "unexpected payment_failed_permanently value");
- assert!(retry.is_some(), "expected retry.is_some()");
- assert_eq!(retry.as_ref().unwrap().final_value_msat, path.last().unwrap().fee_msat, "Retry amount should match last hop in path");
- assert_eq!(retry.as_ref().unwrap().payment_params.payee_pubkey, path.last().unwrap().pubkey, "Retry payee node_id should match last hop in path");
- if let Some(scid) = short_channel_id {
- assert!(retry.as_ref().unwrap().payment_params.previously_failed_channels.contains(&scid));
- }
-
#[cfg(test)]
{
assert!(error_code.is_some(), "expected error_code.is_some() = true");
use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use crate::chain::transaction::OutPoint;
use crate::chain::keysinterface::{ChannelSigner, EcdsaChannelSigner, EntropySource};
+use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination};
use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT};
use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
use crate::util::enforcing_trait_impls::EnforcingSigner;
use crate::util::test_utils;
-use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination};
use crate::util::errors::APIError;
use crate::util::ser::{Writeable, ReadableArgs};
+use crate::util::string::UntrustedString;
use crate::util::config::UserConfig;
use bitcoin::hash_types::BlockHash;
if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
match action {
&ErrorAction::SendErrorMessage { .. } => {
- nodes[1].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), expected_regex, 1);
+ nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1);
},
_ => panic!("unexpected event!"),
}
assert_eq!(added_monitors[0].0, funding_output);
added_monitors.clear();
}
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
if steps & 0x0f == 5 { return; }
added_monitors.clear();
}
+ expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
let events_4 = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events_4.len(), 0);
let commit_signed_msg = msgs::CommitmentSigned {
channel_id: chan.2,
signature: res.0,
- htlc_signatures: res.1
+ htlc_signatures: res.1,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
};
let update_fee = msgs::UpdateFee {
unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)), true, APIError::ChannelUnavailable { ref err },
assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1);
+ nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1);
}
// This should also be true if we try to forward a payment.
_ => panic!("Unexpected error variant"),
}
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
+ nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 1);
send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
}
let commit_signed_msg = msgs::CommitmentSigned {
channel_id: chan.2,
signature: res.0,
- htlc_signatures: res.1
+ htlc_signatures: res.1,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
};
// Send the commitment_signed message to the nodes[1].
let raa_msg = msgs::RevokeAndACK {
channel_id: chan.2,
per_commitment_secret: local_secret,
- next_per_commitment_point: next_local_point
+ next_per_commitment_point: next_local_point,
+ #[cfg(taproot)]
+ next_local_nonce: None,
};
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg);
unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err },
assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1);
+ nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1);
}
// channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err },
assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)));
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put our balance under counterparty-announced channel reserve value".to_string(), 2);
+ nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 2);
}
let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
}
let chan_id = Some(chan_1.2);
match forwarded_events[1] {
- Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
+ Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
assert_eq!(fee_earned_msat, Some(1000));
assert_eq!(prev_channel_id, chan_id);
assert_eq!(claim_from_onchain_tx, true);
assert_eq!(next_channel_id, Some(chan_2.2));
+ assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
},
_ => panic!()
}
match forwarded_events[2] {
- Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
+ Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
assert_eq!(fee_earned_msat, Some(1000));
assert_eq!(prev_channel_id, chan_id);
assert_eq!(claim_from_onchain_tx, true);
assert_eq!(next_channel_id, Some(chan_2.2));
+ assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
},
_ => panic!()
}
assert_eq!(commitment_spend.input.len(), 2);
assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
- assert_eq!(commitment_spend.lock_time.0, 0);
+ assert_eq!(commitment_spend.lock_time.0, nodes[1].best_block_info().1 + 1);
assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
// We don't bother to check that B can claim the HTLC output on its commitment tx here as
// we already checked the same situation with A.
let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
let payment_id = PaymentId([42; 32]);
let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &route).unwrap();
- nodes[0].node.test_send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
_ => panic!("Unexpected event"),
}
match events[1] {
- Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
+ Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
assert_eq!(fee_earned_msat, Some(1000));
assert_eq!(prev_channel_id, Some(chan_1.2));
assert_eq!(claim_from_onchain_tx, true);
assert_eq!(next_channel_id, Some(chan_2.2));
+ assert_eq!(outbound_amount_forwarded_msat, Some(3000000));
},
_ => panic!("Unexpected event"),
}
check_spends!(b_txn[0], commitment_tx[0]);
assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
- assert_eq!(b_txn[0].lock_time.0, 0); // Success tx
+ assert_eq!(b_txn[0].lock_time.0, nodes[1].best_block_info().1 + 1); // Success tx
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err },
assert!(regex::Regex::new(r"Cannot send less than their minimum HTLC value \(\d+\)").unwrap().is_match(err)));
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send less than their minimum HTLC value".to_string(), 1);
+ nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send less than their minimum HTLC value", 1);
}
#[test]
assert_eq!(err, "Cannot send 0-msat HTLC"));
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send 0-msat HTLC".to_string(), 1);
+ nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1);
}
#[test]
assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1);
+ nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1);
}
#[test]
assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1);
+ nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1);
send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
}
if !revoked {
assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
} else {
- assert_eq!(timeout_tx[0].lock_time.0, 0);
+ assert_eq!(timeout_tx[0].lock_time.0, 12);
}
// We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
mine_transaction(&nodes[0], &timeout_tx[0]);
}
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
- &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
+ &msgs::RevokeAndACK {
+ channel_id,
+ per_commitment_secret,
+ next_per_commitment_point,
+ #[cfg(taproot)]
+ next_local_nonce: None,
+ });
assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() });
}
}
+#[test]
+fn test_onion_value_mpp_set_calculation() {
+ // Test that we use the onion value `amt_to_forward` when
+ // calculating whether we've reached the `total_msat` of an MPP
+ // by having a routing node forward more than `amt_to_forward`
+ // and checking that the receiving node doesn't generate
+ // a PaymentClaimable event too early
+ let node_count = 4;
+ let chanmon_cfgs = create_chanmon_cfgs(node_count);
+ let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
+ let mut nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
+
+ let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
+ let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
+ let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
+ let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
+
+ let total_msat = 100_000;
+ let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]];
+ let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat);
+ let sample_path = route.paths.pop().unwrap();
+
+ let mut path_1 = sample_path.clone();
+ path_1[0].pubkey = nodes[1].node.get_our_node_id();
+ path_1[0].short_channel_id = chan_1_id;
+ path_1[1].pubkey = nodes[3].node.get_our_node_id();
+ path_1[1].short_channel_id = chan_3_id;
+ path_1[1].fee_msat = 100_000;
+ route.paths.push(path_1);
+
+ let mut path_2 = sample_path.clone();
+ path_2[0].pubkey = nodes[2].node.get_our_node_id();
+ path_2[0].short_channel_id = chan_2_id;
+ path_2[1].pubkey = nodes[3].node.get_our_node_id();
+ path_2[1].short_channel_id = chan_4_id;
+ path_2[1].fee_msat = 1_000;
+ route.paths.push(path_2);
+
+ // Send payment
+ let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
+ let onion_session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(our_payment_secret), payment_id, &route).unwrap();
+ nodes[0].node.test_send_payment_internal(&route, our_payment_hash, &Some(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
+ check_added_monitors!(nodes[0], expected_paths.len());
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), expected_paths.len());
+
+ // First path
+ let ev = remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events);
+ let mut payment_event = SendEvent::from_event(ev);
+ let mut prev_node = &nodes[0];
+
+ for (idx, &node) in expected_paths[0].iter().enumerate() {
+ assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
+
+ if idx == 0 { // routing node
+ let session_priv = [3; 32];
+ let height = nodes[0].best_block_info().1;
+ let session_priv = SecretKey::from_slice(&session_priv).unwrap();
+ let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
+ let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000, &Some(our_payment_secret), height + 1, &None).unwrap();
+ // Edit amt_to_forward to simulate the sender having set
+ // the final amount and the routing node taking less fee
+ onion_payloads[1].amt_to_forward = 99_000;
+ let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash);
+ payment_event.msgs[0].onion_routing_packet = new_onion_packet;
+ }
+
+ node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]);
+ check_added_monitors!(node, 0);
+ commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
+ expect_pending_htlcs_forwardable!(node);
+
+ if idx == 0 {
+ let mut events_2 = node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events_2.len(), 1);
+ check_added_monitors!(node, 1);
+ payment_event = SendEvent::from_event(events_2.remove(0));
+ assert_eq!(payment_event.msgs.len(), 1);
+ } else {
+ let events_2 = node.node.get_and_clear_pending_events();
+ assert!(events_2.is_empty());
+ }
+
+ prev_node = node;
+ }
+
+ // Second path
+ let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events);
+ pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None);
+
+ claim_payment_along_route(&nodes[0], expected_paths, false, our_payment_preimage);
+}
+
+fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) {
+
+ let routing_node_count = msat_amounts.len();
+ let node_count = routing_node_count + 2;
+
+ let chanmon_cfgs = create_chanmon_cfgs(node_count);
+ let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]);
+ let nodes = create_network(node_count, &node_cfgs, &node_chanmgrs);
+
+ let src_idx = 0;
+ let dst_idx = 1;
+
+ // Create channels for each amount
+ let mut expected_paths = Vec::with_capacity(routing_node_count);
+ let mut src_chan_ids = Vec::with_capacity(routing_node_count);
+ let mut dst_chan_ids = Vec::with_capacity(routing_node_count);
+ for i in 0..routing_node_count {
+ let routing_node = 2 + i;
+ let src_chan_id = create_announced_chan_between_nodes(&nodes, src_idx, routing_node).0.contents.short_channel_id;
+ src_chan_ids.push(src_chan_id);
+ let dst_chan_id = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx).0.contents.short_channel_id;
+ dst_chan_ids.push(dst_chan_id);
+ let path = vec![&nodes[routing_node], &nodes[dst_idx]];
+ expected_paths.push(path);
+ }
+ let expected_paths: Vec<&[&Node]> = expected_paths.iter().map(|route| route.as_slice()).collect();
+
+ // Create a route for each amount
+ let example_amount = 100000;
+ let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount);
+ let sample_path = route.paths.pop().unwrap();
+ for i in 0..routing_node_count {
+ let routing_node = 2 + i;
+ let mut path = sample_path.clone();
+ path[0].pubkey = nodes[routing_node].node.get_our_node_id();
+ path[0].short_channel_id = src_chan_ids[i];
+ path[1].pubkey = nodes[dst_idx].node.get_our_node_id();
+ path[1].short_channel_id = dst_chan_ids[i];
+ path[1].fee_msat = msat_amounts[i];
+ route.paths.push(path);
+ }
+
+ // Send payment with manually set total_msat
+ let payment_id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes());
+ let onion_session_privs = nodes[src_idx].node.test_add_new_pending_payment(our_payment_hash, Some(our_payment_secret), payment_id, &route).unwrap();
+ nodes[src_idx].node.test_send_payment_internal(&route, our_payment_hash, &Some(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap();
+ check_added_monitors!(nodes[src_idx], expected_paths.len());
+
+ let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), expected_paths.len());
+ let mut amount_received = 0;
+ for (path_idx, expected_path) in expected_paths.iter().enumerate() {
+ let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
+
+ let current_path_amount = msat_amounts[path_idx];
+ amount_received += current_path_amount;
+ let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat;
+ pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None);
+ }
+
+ claim_payment_along_route(&nodes[src_idx], &expected_paths, false, our_payment_preimage);
+}
+
+#[test]
+fn test_overshoot_mpp() {
+ do_test_overshoot_mpp(&[100_000, 101_000], 200_000);
+ do_test_overshoot_mpp(&[100_000, 10_000, 100_000], 200_000);
+}
+
#[test]
fn test_simple_mpp() {
// Simple test of sending a multi-path payment.
let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
- check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: "Hi".to_string() }, true);
+ check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true);
}
#[test]
assert_eq!(added_monitors[0].0, funding_output);
added_monitors.clear();
}
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
assert_eq!(added_monitors[0].0, funding_output);
added_monitors.clear();
}
+ expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
let events_4 = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events_4.len(), 0);
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], false);
- check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() });
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) });
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
check_added_monitors!(nodes[0], 2);
- check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() });
+ check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) });
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
match events[0] {
nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
check_added_monitors!(nodes[1], 1);
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
check_added_monitors!(nodes[0], 1);
+ expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
let events_1 = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events_1.len(), 0);
if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
});
- let payment_params_opt = Some(payment_params);
let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
dup_route.paths.push(route.paths[1].clone());
nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(our_payment_secret), payment_id, &dup_route).unwrap()
};
- nodes[0].node.test_send_payment_along_path(&route.paths[0], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
check_added_monitors!(nodes[0], 1);
{
}
assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
- nodes[0].node.test_send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash, &Some(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
check_added_monitors!(nodes[0], 1);
{
expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
- nodes[0].node.test_send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[2]).unwrap();
+ nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[2]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
check_added_monitors!(nodes[1], 1);
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
check_added_monitors!(nodes[0], 1);
+ expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
}
nodes[0].node.timer_tick_occurred();
check_added_monitors!(nodes[0], 1);
- nodes[0].logger.assert_log_contains("lightning::ln::channel".to_string(), "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure".to_string(), 1);
+ nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1);
}
let _ = nodes[0].node.get_and_clear_pending_msg_events();
// You may not use this file except in accordance with one or both of these
// licenses.
-//! High level lightning structs and impls live here.
-//!
-//! You probably want to create a [`ChannelManager`], and a [`P2PGossipSync`] first.
-//! Then, you probably want to pass them both on to a peer_handler::PeerManager and use that to
-//! create/manage connections and call get_and_clear_pending_events after each action, handling
-//! them appropriately.
-//!
-//! When you want to open/close a channel or send a payment, call into your [`ChannelManager`] and
-//! when you want to learn things about the network topology (eg get a route for sending a payment),
-//! call into your [`P2PGossipSync`].
-//!
-//! [`ChannelManager`]: channelmanager::ChannelManager
-//! [`P2PGossipSync`]: crate::routing::gossip::P2PGossipSync
+//! Implementations of various parts of the Lightning protocol are in this module.
#[cfg(any(test, feature = "_test_utils"))]
#[macro_use]
pub use self::peer_channel_encryptor::LN_MAX_MSG_LEN;
/// payment_hash type, use to cross-lock hop
-/// (C-not exported) as we just use [u8; 32] directly
+///
+/// This is not exported to bindings users as we just use [u8; 32] directly
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
pub struct PaymentHash(pub [u8; 32]);
/// payment_preimage type, use to route payment between hop
-/// (C-not exported) as we just use [u8; 32] directly
+///
+/// This is not exported to bindings users as we just use [u8; 32] directly
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
pub struct PaymentPreimage(pub [u8; 32]);
/// payment_secret type, use to authenticate sender to the receiver and tie MPP HTLCs together
-/// (C-not exported) as we just use [u8; 32] directly
+///
+/// This is not exported to bindings users as we just use [u8; 32] directly
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
pub struct PaymentSecret(pub [u8; 32]);
//! Further functional tests which test blockchain reorganizations.
#[cfg(anchors)]
-use crate::chain::keysinterface::BaseSign;
+use crate::chain::keysinterface::{ChannelSigner, EcdsaChannelSigner};
#[cfg(anchors)]
use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, Balance};
use crate::chain::transaction::OutPoint;
use crate::chain::chaininterface::LowerBoundedFeeEstimator;
+#[cfg(anchors)]
+use crate::events::bump_transaction::BumpTransactionEvent;
+use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
use crate::ln::channel;
#[cfg(anchors)]
use crate::ln::chan_utils;
+#[cfg(anchors)]
+use crate::ln::channelmanager::ChannelManager;
use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId};
use crate::ln::msgs::ChannelMessageHandler;
#[cfg(anchors)]
use crate::util::config::UserConfig;
#[cfg(anchors)]
-use crate::util::events::BumpTransactionEvent;
-use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
+use crate::util::crypto::sign;
+#[cfg(anchors)]
+use crate::util::ser::Writeable;
+#[cfg(anchors)]
+use crate::util::test_utils;
+#[cfg(anchors)]
+use bitcoin::blockdata::transaction::EcdsaSighashType;
use bitcoin::blockdata::script::Builder;
use bitcoin::blockdata::opcodes;
use bitcoin::secp256k1::Secp256k1;
#[cfg(anchors)]
-use bitcoin::{Amount, Script, TxIn, TxOut, PackedLockTime};
+use bitcoin::secp256k1::SecretKey;
+#[cfg(anchors)]
+use bitcoin::{Amount, PublicKey, Script, TxIn, TxOut, PackedLockTime, Witness};
use bitcoin::Transaction;
+#[cfg(anchors)]
+use bitcoin::util::sighash::SighashCache;
use crate::prelude::*;
let mut holder_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
// Certain block `ConnectStyle`s cause an extra `ChannelClose` event to be emitted since the
- // best block is being updated prior to the confirmed transactions.
+ // best block is updated before the confirmed transactions are notified.
match *nodes[0].connect_style.borrow() {
ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstReorgsOnlyTip|ConnectStyle::BestBlockFirstSkippingBlocks => {
assert_eq!(holder_events.len(), 3);
let mut htlc_txs = Vec::with_capacity(2);
for event in holder_events {
match event {
- Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { htlc_descriptors, .. }) => {
+ Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { htlc_descriptors, tx_lock_time, .. }) => {
assert_eq!(htlc_descriptors.len(), 1);
let htlc_descriptor = &htlc_descriptors[0];
let signer = nodes[0].keys_manager.derive_channel_keys(
let per_commitment_point = signer.get_per_commitment_point(htlc_descriptor.per_commitment_number, &secp);
let mut htlc_tx = Transaction {
version: 2,
- lock_time: if htlc_descriptor.htlc.offered {
- PackedLockTime(htlc_descriptor.htlc.cltv_expiry)
- } else {
- PackedLockTime::ZERO
- },
+ lock_time: tx_lock_time,
input: vec![
htlc_descriptor.unsigned_tx_input(), // HTLC input
TxIn { ..Default::default() } // Fee input
// Clear the remaining events as they're not relevant to what we're testing.
nodes[0].node.get_and_clear_pending_events();
}
+
+#[cfg(anchors)]
+#[test]
+fn test_anchors_aggregated_revoked_htlc_tx() {
+ // Test that `ChannelMonitor`s can properly detect and claim funds from a counterparty claiming
+ // multiple HTLCs from multiple channels in a single transaction via the success path from a
+ // revoked commitment.
+ let secp = Secp256k1::new();
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ // Required to sign a revoked commitment transaction
+ chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let mut anchors_config = UserConfig::default();
+ anchors_config.channel_handshake_config.announced_channel = true;
+ anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config), Some(anchors_config)]);
+
+ let bob_persister: test_utils::TestPersister;
+ let bob_chain_monitor: test_utils::TestChainMonitor;
+ let bob_deserialized: ChannelManager<
+ &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface,
+ &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator,
+ &test_utils::TestRouter, &test_utils::TestLogger,
+ >;
+
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
+ let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
+
+ // Serialize Bob with the initial state of both channels, which we'll use later.
+ let bob_serialized = nodes[1].node.encode();
+
+ // Route two payments for each channel from Alice to Bob to lock in the HTLCs.
+ let payment_a = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
+ let payment_b = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
+ let payment_c = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
+ let payment_d = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
+
+ // Serialize Bob's monitors with the HTLCs locked in. We'll restart Bob later on with the state
+ // at this point such that he broadcasts a revoked commitment transaction with the HTLCs
+ // present.
+ let bob_serialized_monitor_a = get_monitor!(nodes[1], chan_a.2).encode();
+ let bob_serialized_monitor_b = get_monitor!(nodes[1], chan_b.2).encode();
+
+ // Bob claims all the HTLCs...
+ claim_payment(&nodes[0], &[&nodes[1]], payment_a.0);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_b.0);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_c.0);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_d.0);
+
+ // ...and sends one back through each channel such that he has a motive to broadcast his
+ // revoked state.
+ send_payment(&nodes[1], &[&nodes[0]], 30_000_000);
+ send_payment(&nodes[1], &[&nodes[0]], 30_000_000);
+
+ // Restart Bob with the revoked state and provide the HTLC preimages he claimed.
+ reload_node!(
+ nodes[1], anchors_config, bob_serialized, &[&bob_serialized_monitor_a, &bob_serialized_monitor_b],
+ bob_persister, bob_chain_monitor, bob_deserialized
+ );
+ for chan_id in [chan_a.2, chan_b.2].iter() {
+ let monitor = get_monitor!(nodes[1], chan_id);
+ for payment in [payment_a, payment_b, payment_c, payment_d].iter() {
+ monitor.provide_payment_preimage(
+ &payment.1, &payment.0, &node_cfgs[1].tx_broadcaster,
+ &LowerBoundedFeeEstimator::new(node_cfgs[1].fee_estimator), &nodes[1].logger
+ );
+ }
+ }
+
+ // Bob force closes by restarting with the outdated state, prompting the ChannelMonitors to
+ // broadcast the latest commitment transaction known to them, which in our case is the one with
+ // the HTLCs still pending.
+ nodes[1].node.timer_tick_occurred();
+ check_added_monitors(&nodes[1], 2);
+ check_closed_event!(&nodes[1], 2, ClosureReason::OutdatedChannelManager);
+ let (revoked_commitment_a, revoked_commitment_b) = {
+ let txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(txn.len(), 2);
+ assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
+ assert_eq!(txn[1].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
+ if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
+ check_spends!(&txn[0], &chan_a.3);
+ check_spends!(&txn[1], &chan_b.3);
+ (txn[0].clone(), txn[1].clone())
+ } else {
+ check_spends!(&txn[1], &chan_a.3);
+ check_spends!(&txn[0], &chan_b.3);
+ (txn[1].clone(), txn[0].clone())
+ }
+ };
+
+ // Bob should now receive two events to bump his revoked commitment transaction fees.
+ assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ let events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ let anchor_tx = {
+ let secret_key = SecretKey::from_slice(&[1; 32]).unwrap();
+ let public_key = PublicKey::new(secret_key.public_key(&secp));
+ let fee_utxo_script = Script::new_v0_p2wpkh(&public_key.wpubkey_hash().unwrap());
+ let coinbase_tx = Transaction {
+ version: 2,
+ lock_time: PackedLockTime::ZERO,
+ input: vec![TxIn { ..Default::default() }],
+ output: vec![TxOut { // UTXO to attach fees to `anchor_tx`
+ value: Amount::ONE_BTC.to_sat(),
+ script_pubkey: fee_utxo_script.clone(),
+ }],
+ };
+ let mut anchor_tx = Transaction {
+ version: 2,
+ lock_time: PackedLockTime::ZERO,
+ input: vec![
+ TxIn { // Fee input
+ previous_output: bitcoin::OutPoint { txid: coinbase_tx.txid(), vout: 0 },
+ ..Default::default()
+ },
+ ],
+ output: vec![TxOut { // Fee input change
+ value: coinbase_tx.output[0].value / 2 ,
+ script_pubkey: Script::new_op_return(&[]),
+ }],
+ };
+ let mut signers = Vec::with_capacity(2);
+ for event in events {
+ match event {
+ Event::BumpTransaction(BumpTransactionEvent::ChannelClose { anchor_descriptor, .. }) => {
+ anchor_tx.input.push(TxIn {
+ previous_output: anchor_descriptor.outpoint,
+ ..Default::default()
+ });
+ let signer = nodes[1].keys_manager.derive_channel_keys(
+ anchor_descriptor.channel_value_satoshis, &anchor_descriptor.channel_keys_id,
+ );
+ signers.push(signer);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+ for (i, signer) in signers.into_iter().enumerate() {
+ let anchor_idx = i + 1;
+ let funding_sig = signer.sign_holder_anchor_input(&mut anchor_tx, anchor_idx, &secp).unwrap();
+ anchor_tx.input[anchor_idx].witness = chan_utils::build_anchor_input_witness(
+ &signer.pubkeys().funding_pubkey, &funding_sig
+ );
+ }
+ let fee_utxo_sig = {
+ let witness_script = Script::new_p2pkh(&public_key.pubkey_hash());
+ let sighash = hash_to_message!(&SighashCache::new(&anchor_tx).segwit_signature_hash(
+ 0, &witness_script, coinbase_tx.output[0].value, EcdsaSighashType::All
+ ).unwrap()[..]);
+ let sig = sign(&secp, &sighash, &secret_key);
+ let mut sig = sig.serialize_der().to_vec();
+ sig.push(EcdsaSighashType::All as u8);
+ sig
+ };
+ anchor_tx.input[0].witness = Witness::from_vec(vec![fee_utxo_sig, public_key.to_bytes()]);
+ check_spends!(anchor_tx, coinbase_tx, revoked_commitment_a, revoked_commitment_b);
+ anchor_tx
+ };
+
+ for node in &nodes {
+ mine_transactions(node, &[&revoked_commitment_a, &revoked_commitment_b, &anchor_tx]);
+ }
+ check_added_monitors!(&nodes[0], 2);
+ check_closed_broadcast(&nodes[0], 2, true);
+ check_closed_event!(&nodes[0], 2, ClosureReason::CommitmentTxConfirmed);
+
+ // Alice should detect the confirmed revoked commitments, and attempt to claim all of the
+ // revoked outputs.
+ {
+ let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(txn.len(), 2);
+
+ let (revoked_claim_a, revoked_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_a.txid() {
+ (&txn[0], &txn[1])
+ } else {
+ (&txn[1], &txn[0])
+ };
+
+ // TODO: to_self claim must be separate from HTLC claims
+ assert_eq!(revoked_claim_a.input.len(), 3); // Spends both HTLC outputs and to_self output
+ assert_eq!(revoked_claim_a.output.len(), 1);
+ check_spends!(revoked_claim_a, revoked_commitment_a);
+ assert_eq!(revoked_claim_b.input.len(), 3); // Spends both HTLC outputs and to_self output
+ assert_eq!(revoked_claim_b.output.len(), 1);
+ check_spends!(revoked_claim_b, revoked_commitment_b);
+ }
+
+ // Since Bob was able to confirm his revoked commitment, he'll now try to claim the HTLCs
+ // through the success path.
+ assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ let mut events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
+ // Certain block `ConnectStyle`s cause an extra `ChannelClose` event to be emitted since the
+ // best block is updated before the confirmed transactions are notified.
+ match *nodes[1].connect_style.borrow() {
+ ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstReorgsOnlyTip|ConnectStyle::BestBlockFirstSkippingBlocks => {
+ assert_eq!(events.len(), 4);
+ if let Event::BumpTransaction(BumpTransactionEvent::ChannelClose { .. }) = events.remove(0) {}
+ else { panic!("unexpected event"); }
+ if let Event::BumpTransaction(BumpTransactionEvent::ChannelClose { .. }) = events.remove(1) {}
+ else { panic!("unexpected event"); }
+
+ },
+ _ => assert_eq!(events.len(), 2),
+ };
+ let htlc_tx = {
+ let secret_key = SecretKey::from_slice(&[1; 32]).unwrap();
+ let public_key = PublicKey::new(secret_key.public_key(&secp));
+ let fee_utxo_script = Script::new_v0_p2wpkh(&public_key.wpubkey_hash().unwrap());
+ let coinbase_tx = Transaction {
+ version: 2,
+ lock_time: PackedLockTime::ZERO,
+ input: vec![TxIn { ..Default::default() }],
+ output: vec![TxOut { // UTXO to attach fees to `htlc_tx`
+ value: Amount::ONE_BTC.to_sat(),
+ script_pubkey: fee_utxo_script.clone(),
+ }],
+ };
+ let mut htlc_tx = Transaction {
+ version: 2,
+ lock_time: PackedLockTime::ZERO,
+ input: vec![TxIn { // Fee input
+ previous_output: bitcoin::OutPoint { txid: coinbase_tx.txid(), vout: 0 },
+ ..Default::default()
+ }],
+ output: vec![TxOut { // Fee input change
+ value: coinbase_tx.output[0].value / 2 ,
+ script_pubkey: Script::new_op_return(&[]),
+ }],
+ };
+ let mut descriptors = Vec::with_capacity(4);
+ for event in events {
+ if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { mut htlc_descriptors, tx_lock_time, .. }) = event {
+ assert_eq!(htlc_descriptors.len(), 2);
+ for htlc_descriptor in &htlc_descriptors {
+ assert!(!htlc_descriptor.htlc.offered);
+ let signer = nodes[1].keys_manager.derive_channel_keys(
+ htlc_descriptor.channel_value_satoshis, &htlc_descriptor.channel_keys_id
+ );
+ let per_commitment_point = signer.get_per_commitment_point(htlc_descriptor.per_commitment_number, &secp);
+ htlc_tx.input.push(htlc_descriptor.unsigned_tx_input());
+ htlc_tx.output.push(htlc_descriptor.tx_output(&per_commitment_point, &secp));
+ }
+ descriptors.append(&mut htlc_descriptors);
+ htlc_tx.lock_time = tx_lock_time;
+ } else {
+ panic!("Unexpected event");
+ }
+ }
+ for (idx, htlc_descriptor) in descriptors.into_iter().enumerate() {
+ let htlc_input_idx = idx + 1;
+ let signer = nodes[1].keys_manager.derive_channel_keys(
+ htlc_descriptor.channel_value_satoshis, &htlc_descriptor.channel_keys_id
+ );
+ let our_sig = signer.sign_holder_htlc_transaction(&htlc_tx, htlc_input_idx, &htlc_descriptor, &secp).unwrap();
+ let per_commitment_point = signer.get_per_commitment_point(htlc_descriptor.per_commitment_number, &secp);
+ let witness_script = htlc_descriptor.witness_script(&per_commitment_point, &secp);
+ htlc_tx.input[htlc_input_idx].witness = htlc_descriptor.tx_input_witness(&our_sig, &witness_script);
+ }
+ let fee_utxo_sig = {
+ let witness_script = Script::new_p2pkh(&public_key.pubkey_hash());
+ let sighash = hash_to_message!(&SighashCache::new(&htlc_tx).segwit_signature_hash(
+ 0, &witness_script, coinbase_tx.output[0].value, EcdsaSighashType::All
+ ).unwrap()[..]);
+ let sig = sign(&secp, &sighash, &secret_key);
+ let mut sig = sig.serialize_der().to_vec();
+ sig.push(EcdsaSighashType::All as u8);
+ sig
+ };
+ htlc_tx.input[0].witness = Witness::from_vec(vec![fee_utxo_sig, public_key.to_bytes()]);
+ check_spends!(htlc_tx, coinbase_tx, revoked_commitment_a, revoked_commitment_b);
+ htlc_tx
+ };
+
+ for node in &nodes {
+ mine_transaction(node, &htlc_tx);
+ }
+
+ // Alice should see that Bob is trying to claim to HTLCs, so she should now try to claim them at
+ // the second level instead.
+ let revoked_claims = {
+ let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(txn.len(), 4);
+
+ let revoked_to_self_claim_a = txn.iter().find(|tx|
+ tx.input.len() == 1 &&
+ tx.output.len() == 1 &&
+ tx.input[0].previous_output.txid == revoked_commitment_a.txid()
+ ).unwrap();
+ check_spends!(revoked_to_self_claim_a, revoked_commitment_a);
+
+ let revoked_to_self_claim_b = txn.iter().find(|tx|
+ tx.input.len() == 1 &&
+ tx.output.len() == 1 &&
+ tx.input[0].previous_output.txid == revoked_commitment_b.txid()
+ ).unwrap();
+ check_spends!(revoked_to_self_claim_b, revoked_commitment_b);
+
+ let revoked_htlc_claims = txn.iter().filter(|tx|
+ tx.input.len() == 2 &&
+ tx.output.len() == 1 &&
+ tx.input[0].previous_output.txid == htlc_tx.txid()
+ ).collect::<Vec<_>>();
+ assert_eq!(revoked_htlc_claims.len(), 2);
+ for revoked_htlc_claim in revoked_htlc_claims {
+ check_spends!(revoked_htlc_claim, htlc_tx);
+ }
+
+ txn
+ };
+ for node in &nodes {
+ mine_transactions(node, &revoked_claims.iter().collect::<Vec<_>>());
+ }
+
+
+ // Connect one block to make sure the HTLC events are not yielded while ANTI_REORG_DELAY has not
+ // been reached.
+ connect_blocks(&nodes[0], 1);
+ connect_blocks(&nodes[1], 1);
+
+ assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+
+ // Connect the remaining blocks to reach ANTI_REORG_DELAY.
+ connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2);
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2);
+
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ let spendable_output_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
+ assert_eq!(spendable_output_events.len(), 4);
+ for (idx, event) in spendable_output_events.iter().enumerate() {
+ if let Event::SpendableOutputs { outputs } = event {
+ assert_eq!(outputs.len(), 1);
+ let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(
+ &[&outputs[0]], Vec::new(), Script::new_op_return(&[]), 253, &Secp256k1::new(),
+ ).unwrap();
+ check_spends!(spend_tx, revoked_claims[idx]);
+ } else {
+ panic!("unexpected event");
+ }
+ }
+
+ assert!(nodes[0].node.list_channels().is_empty());
+ assert!(nodes[1].node.list_channels().is_empty());
+ assert!(nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]).is_empty());
+ // TODO: From Bob's PoV, he still thinks he can claim the outputs from his revoked commitment.
+ // This needs to be fixed before we enable pruning `ChannelMonitor`s once they don't have any
+ // balances to claim.
+ //
+ // The 6 claimable balances correspond to his `to_self` outputs and the 2 HTLC outputs in each
+ // revoked commitment which Bob has the preimage for.
+ assert_eq!(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[]).len(), 6);
+}
use crate::io::{self, Read};
use crate::io_extras::read_to_end;
-use crate::util::events::{MessageSendEventsProvider, OnionMessageProvider};
+use crate::events::{MessageSendEventsProvider, OnionMessageProvider};
use crate::util::logger;
use crate::util::ser::{LengthReadable, Readable, ReadableArgs, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname};
/// 21 million * 10^8 * 1000
pub(crate) const MAX_VALUE_MSAT: u64 = 21_000_000_0000_0000_000;
+#[cfg(taproot)]
+/// A partial signature that also contains the Musig2 nonce its signer used
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct PartialSignatureWithNonce(pub musig2::types::PartialSignature, pub musig2::types::PublicNonce);
+
/// An error in decoding a message or struct.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum DecodeError {
/// our feature bits with our counterparty's feature bits from the [`Init`] message.
/// This is required to match the equivalent field in [`OpenChannel::channel_type`].
pub channel_type: Option<ChannelTypeFeatures>,
+ #[cfg(taproot)]
+ /// Next nonce the channel initiator should use to create a funding output signature against
+ pub next_local_nonce: Option<musig2::types::PublicNonce>,
}
/// A [`funding_created`] message to be sent to or received from a peer.
pub funding_output_index: u16,
/// The signature of the channel initiator (funder) on the initial commitment transaction
pub signature: Signature,
+ #[cfg(taproot)]
+ /// The partial signature of the channel initiator (funder)
+ pub partial_signature_with_nonce: Option<PartialSignatureWithNonce>,
+ #[cfg(taproot)]
+ /// Next nonce the channel acceptor should use to finalize the funding output signature
+ pub next_local_nonce: Option<musig2::types::PublicNonce>
}
/// A [`funding_signed`] message to be sent to or received from a peer.
pub channel_id: [u8; 32],
/// The signature of the channel acceptor (fundee) on the initial commitment transaction
pub signature: Signature,
+ #[cfg(taproot)]
+ /// The partial signature of the channel acceptor (fundee)
+ pub partial_signature_with_nonce: Option<PartialSignatureWithNonce>,
}
/// A [`channel_ready`] message to be sent to or received from a peer.
pub signature: Signature,
/// Signatures on the HTLC transactions
pub htlc_signatures: Vec<Signature>,
+ #[cfg(taproot)]
+ /// The partial Taproot signature on the commitment transaction
+ pub partial_signature_with_nonce: Option<PartialSignatureWithNonce>,
}
/// A [`revoke_and_ack`] message to be sent to or received from a peer.
pub per_commitment_secret: [u8; 32],
/// The next sender-broadcast commitment transaction's per-commitment point
pub next_per_commitment_point: PublicKey,
+ #[cfg(taproot)]
+ /// Musig nonce the recipient should use in their next commitment signature message
+ pub next_local_nonce: Option<musig2::types::PublicNonce>
}
/// An [`update_fee`] message to be sent to or received from a peer
/// [`OptionalField`] simply gets `Present` if there are enough bytes to read into it), we have a
/// separate enum type for them.
///
-/// (C-not exported) due to a free generic in `T`
+/// This is not exported to bindings users due to a free generic in `T`
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum OptionalField<T> {
/// Optional field is included in message
}
}
+#[cfg(not(taproot))]
+impl_writeable_msg!(AcceptChannel, {
+ temporary_channel_id,
+ dust_limit_satoshis,
+ max_htlc_value_in_flight_msat,
+ channel_reserve_satoshis,
+ htlc_minimum_msat,
+ minimum_depth,
+ to_self_delay,
+ max_accepted_htlcs,
+ funding_pubkey,
+ revocation_basepoint,
+ payment_point,
+ delayed_payment_basepoint,
+ htlc_basepoint,
+ first_per_commitment_point,
+ shutdown_scriptpubkey
+}, {
+ (1, channel_type, option),
+});
+#[cfg(taproot)]
impl_writeable_msg!(AcceptChannel, {
temporary_channel_id,
dust_limit_satoshis,
shutdown_scriptpubkey
}, {
(1, channel_type, option),
+ (4, next_local_nonce, option),
});
impl_writeable_msg!(AnnouncementSignatures, {
max_fee_satoshis
});
+#[cfg(not(taproot))]
impl_writeable_msg!(CommitmentSigned, {
channel_id,
signature,
htlc_signatures
}, {});
+#[cfg(taproot)]
+impl_writeable_msg!(CommitmentSigned, {
+ channel_id,
+ signature,
+ htlc_signatures
+}, {
+ (2, partial_signature_with_nonce, option)
+});
+
impl_writeable!(DecodedOnionErrorPacket, {
hmac,
failuremsg,
pad
});
+#[cfg(not(taproot))]
impl_writeable_msg!(FundingCreated, {
temporary_channel_id,
funding_txid,
funding_output_index,
signature
}, {});
+#[cfg(taproot)]
+impl_writeable_msg!(FundingCreated, {
+ temporary_channel_id,
+ funding_txid,
+ funding_output_index,
+ signature
+}, {
+ (2, partial_signature_with_nonce, option),
+ (4, next_local_nonce, option)
+});
+#[cfg(not(taproot))]
impl_writeable_msg!(FundingSigned, {
channel_id,
signature
}, {});
+#[cfg(taproot)]
+impl_writeable_msg!(FundingSigned, {
+ channel_id,
+ signature
+}, {
+ (2, partial_signature_with_nonce, option)
+});
+
impl_writeable_msg!(ChannelReady, {
channel_id,
next_per_commitment_point,
(1, channel_type, option),
});
+#[cfg(not(taproot))]
impl_writeable_msg!(RevokeAndACK, {
channel_id,
per_commitment_secret,
next_per_commitment_point
}, {});
+#[cfg(taproot)]
+impl_writeable_msg!(RevokeAndACK, {
+ channel_id,
+ per_commitment_secret,
+ next_per_commitment_point
+}, {
+ (4, next_local_nonce, option)
+});
+
impl_writeable_msg!(Shutdown, {
channel_id,
scriptpubkey
first_per_commitment_point: pubkey_6,
shutdown_scriptpubkey: if shutdown { OptionalField::Present(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey()) } else { OptionalField::Absent },
channel_type: None,
+ #[cfg(taproot)]
+ next_local_nonce: None,
};
let encoded_value = accept_channel.encode();
let mut target_value = hex::decode("020202020202020202020202020202020202020202020202020202020202020212345678901234562334032891223698321446687011447600083a840000034d000c89d4c0bcc0bc031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f024d4b6cd1361032ca9bd2aeb9d900aa4d45d9ead80ac9423374c451a7254d076602531fe6068134503d2723133227c867ac8fa6c83c537e9a44c3c5bdbdcb1fe33703462779ad4aad39514614751a71085f2f10e1c7a593e4e030efb5b8721ce55b0b0362c0a046dacce86ddd0343c6d3c7c79c2208ba0d9c9cf24a6d046d21d21f90f703f006a18d5653c4edf5391ff23a61f03ff83d237e880ee61187fa9f379a028e0a").unwrap();
funding_txid: Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap(),
funding_output_index: 255,
signature: sig_1,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
+ #[cfg(taproot)]
+ next_local_nonce: None,
};
let encoded_value = funding_created.encode();
let target_value = hex::decode("02020202020202020202020202020202020202020202020202020202020202026e96fe9f8b0ddcd729ba03cfafa5a27b050b39d354dd980814268dfa9a44d4c200ffd977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a").unwrap();
let funding_signed = msgs::FundingSigned {
channel_id: [2; 32],
signature: sig_1,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
};
let encoded_value = funding_signed.encode();
let target_value = hex::decode("0202020202020202020202020202020202020202020202020202020202020202d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a").unwrap();
channel_id: [2; 32],
signature: sig_1,
htlc_signatures: if htlcs { vec![sig_2, sig_3, sig_4] } else { Vec::new() },
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
};
let encoded_value = commitment_signed.encode();
let mut target_value = hex::decode("0202020202020202020202020202020202020202020202020202020202020202d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a").unwrap();
channel_id: [2; 32],
per_commitment_secret: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
next_per_commitment_point: pubkey_1,
+ #[cfg(taproot)]
+ next_local_nonce: None,
};
let encoded_value = raa.encode();
let target_value = hex::decode("02020202020202020202020202020202020202020202020202020202020202020101010101010101010101010101010101010101010101010101010101010101031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f").unwrap();
use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::chain::keysinterface::{EntropySource, NodeSigner, Recipient};
+use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure};
use crate::ln::{PaymentHash, PaymentSecret};
use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
use crate::ln::channelmanager::{HTLCForwardInfo, FailureCode, CLTV_FAR_FAR_AWAY, MIN_CLTV_EXPIRY_DELTA, PendingAddHTLCInfo, PendingHTLCInfo, PendingHTLCRouting, PaymentId};
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, ChannelUpdate};
use crate::ln::wire::Encode;
-use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure};
use crate::util::ser::{Writeable, Writer};
use crate::util::test_utils;
use crate::util::config::{UserConfig, ChannelConfig};
for f in pending_forwards.iter_mut() {
match f {
&mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { ref mut forward_info, .. }) =>
- forward_info.outgoing_cltv_value += 1,
+ forward_info.outgoing_cltv_value -= 1,
_ => {},
}
}
}, true, Some(23), None, None);
}
+#[test]
+fn test_overshoot_final_cltv() {
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None; 3]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 1, 2);
+ let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 40000);
+
+ let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
+ nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap();
+
+ check_added_monitors!(nodes[0], 1);
+ let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ let mut update_add_0 = update_0.update_add_htlcs[0].clone();
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add_0);
+ commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true);
+
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ for (_, pending_forwards) in nodes[1].node.forward_htlcs.lock().unwrap().iter_mut() {
+ for f in pending_forwards.iter_mut() {
+ match f {
+ &mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { ref mut forward_info, .. }) =>
+ forward_info.outgoing_cltv_value += 1,
+ _ => {},
+ }
+ }
+ }
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ check_added_monitors!(&nodes[1], 1);
+ let update_1 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+ let mut update_add_1 = update_1.update_add_htlcs[0].clone();
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_1);
+ commitment_signed_dance!(nodes[2], nodes[1], update_1.commitment_signed, false, true);
+
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_payment_claimable!(nodes[2], payment_hash, payment_secret, 40_000);
+ claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
+}
+
fn do_test_onion_failure_stale_channel_update(announced_channel: bool) {
// Create a network of three nodes and two channels connecting them. We'll be updating the
// HTLC relay policy of the second channel, causing forwarding failures at the first hop.
&mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
forward_info: PendingHTLCInfo { ref mut outgoing_cltv_value, .. }, ..
}) => {
- *outgoing_cltv_value += 1;
+ *outgoing_cltv_value -= 1;
},
_ => panic!("Unexpected forward"),
}
commitment_signed_dance!(nodes[0], nodes[1], update_1.commitment_signed, false);
// Ensure the payment fails with the expected error.
- let expected_cltv: u32 = 82;
+ let expected_cltv: u32 = 80;
let error_data = expected_cltv.to_be_bytes().to_vec();
let mut fail_conditions = PaymentFailedConditions::new()
.blamed_scid(phantom_scid)
use bitcoin::secp256k1::{self, Secp256k1, SecretKey};
use crate::chain::keysinterface::{EntropySource, NodeSigner, Recipient};
+use crate::events;
use crate::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
use crate::ln::channelmanager::{ChannelDetails, HTLCSource, IDEMPOTENCY_TIMEOUT_TICKS, PaymentId};
use crate::ln::onion_utils::HTLCFailReason;
use crate::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteHop, RouteParameters, RoutePath, Router};
use crate::util::errors::APIError;
-use crate::util::events;
use crate::util::logger::Logger;
use crate::util::time::Time;
#[cfg(all(not(feature = "no-std"), test))]
}
fn is_auto_retryable_now(&self) -> bool {
match self {
- PendingOutboundPayment::Retryable { retry_strategy: Some(strategy), attempts, .. } => {
+ PendingOutboundPayment::Retryable {
+ retry_strategy: Some(strategy), attempts, payment_params: Some(_), ..
+ } => {
strategy.is_retryable_now(&attempts)
},
_ => false,
_ => false,
}
}
- fn payment_parameters(&mut self) -> Option<&mut PaymentParameters> {
- match self {
- PendingOutboundPayment::Retryable { payment_params: Some(ref mut params), .. } => {
- Some(params)
- },
- _ => None,
- }
- }
pub fn insert_previously_failed_scid(&mut self, scid: u64) {
if let PendingOutboundPayment::Retryable { payment_params: Some(params), .. } = self {
params.previously_failed_channels.push(scid);
/// may be surfaced later via [`Event::PaymentPathFailed`] and [`Event::PaymentFailed`].
///
/// [`ChannelManager::send_payment_with_retry`]: crate::ln::channelmanager::ChannelManager::send_payment_with_retry
-/// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
-/// [`Event::PaymentFailed`]: crate::util::events::Event::PaymentFailed
+/// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed
+/// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed
#[derive(Clone, Debug)]
pub enum RetryableSendFailure {
/// The provided [`PaymentParameters::expiry_time`] indicated that the payment has expired. Note
/// yet completed (i.e. generated an [`Event::PaymentSent`] or [`Event::PaymentFailed`]).
///
/// [`PaymentId`]: crate::ln::channelmanager::PaymentId
- /// [`Event::PaymentSent`]: crate::util::events::Event::PaymentSent
- /// [`Event::PaymentFailed`]: crate::util::events::Event::PaymentFailed
+ /// [`Event::PaymentSent`]: crate::events::Event::PaymentSent
+ /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed
DuplicatePayment,
}
/// Because the payment failed outright, no payment tracking is done and no
/// [`Event::PaymentPathFailed`] or [`Event::PaymentFailed`] events will be generated.
///
- /// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
- /// [`Event::PaymentFailed`]: crate::util::events::Event::PaymentFailed
+ /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed
+ /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed
ParameterError(APIError),
/// A parameter in a single path which was passed to send_payment was invalid, preventing us
/// from attempting to send the payment at all.
/// The results here are ordered the same as the paths in the route object which was passed to
/// send_payment.
///
- /// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
- /// [`Event::PaymentFailed`]: crate::util::events::Event::PaymentFailed
+ /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed
+ /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed
PathParameterError(Vec<Result<(), APIError>>),
/// All paths which were attempted failed to send, with no channel state change taking place.
/// You can freely resend the payment in full (though you probably want to do so over different
/// Because the payment failed outright, no payment tracking is done and no
/// [`Event::PaymentPathFailed`] or [`Event::PaymentFailed`] events will be generated.
///
- /// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
- /// [`Event::PaymentFailed`]: crate::util::events::Event::PaymentFailed
+ /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed
+ /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed
AllFailedResendSafe(Vec<APIError>),
/// Indicates that a payment for the provided [`PaymentId`] is already in-flight and has not
/// yet completed (i.e. generated an [`Event::PaymentSent`] or [`Event::PaymentFailed`]).
///
/// [`PaymentId`]: crate::ln::channelmanager::PaymentId
- /// [`Event::PaymentSent`]: crate::util::events::Event::PaymentSent
- /// [`Event::PaymentFailed`]: crate::util::events::Event::PaymentFailed
+ /// [`Event::PaymentSent`]: crate::events::Event::PaymentSent
+ /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed
DuplicatePayment,
/// Some paths that were attempted failed to send, though some paths may have succeeded. At least
/// some paths have irrevocably committed to the HTLC.
NS::Target: NodeSigner,
L::Target: Logger,
IH: Fn() -> InFlightHtlcs,
- SP: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>,
+ SP: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>,
{
self.send_payment_internal(payment_id, payment_hash, payment_secret, None, retry_strategy,
route_params, router, first_hops, &compute_inflight_htlcs, entropy_source, node_signer,
where
ES::Target: EntropySource,
NS::Target: NodeSigner,
- F: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ F: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
let onion_session_privs = self.add_new_pending_payment(payment_hash, *payment_secret, payment_id, None, route, None, None, entropy_source, best_block_height)?;
self.pay_route_internal(route, payment_hash, payment_secret, None, payment_id, None,
NS::Target: NodeSigner,
L::Target: Logger,
IH: Fn() -> InFlightHtlcs,
- SP: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>,
+ SP: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>,
{
let preimage = payment_preimage
.unwrap_or_else(|| PaymentPreimage(entropy_source.get_secure_random_bytes()));
where
ES::Target: EntropySource,
NS::Target: NodeSigner,
- F: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ F: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
let preimage = payment_preimage
.unwrap_or_else(|| PaymentPreimage(entropy_source.get_secure_random_bytes()));
R::Target: Router,
ES::Target: EntropySource,
NS::Target: NodeSigner,
- SP: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>,
+ SP: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>,
IH: Fn() -> InFlightHtlcs,
FH: Fn() -> Vec<ChannelDetails>,
L::Target: Logger,
}));
break
}
- }
+ } else { debug_assert!(false); }
}
}
core::mem::drop(outbounds);
/// Errors immediately on [`RetryableSendFailure`] error conditions. Otherwise, further errors may
/// be surfaced asynchronously via [`Event::PaymentPathFailed`] and [`Event::PaymentFailed`].
///
- /// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
- /// [`Event::PaymentFailed`]: crate::util::events::Event::PaymentFailed
+ /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed
+ /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed
fn send_payment_internal<R: Deref, NS: Deref, ES: Deref, IH, SP, L: Deref>(
&self, payment_id: PaymentId, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>,
keysend_preimage: Option<PaymentPreimage>, retry_strategy: Retry, route_params: RouteParameters,
NS::Target: NodeSigner,
L::Target: Logger,
IH: Fn() -> InFlightHtlcs,
- SP: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ SP: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
#[cfg(feature = "std")] {
if has_expired(&route_params) {
NS::Target: NodeSigner,
L::Target: Logger,
IH: Fn() -> InFlightHtlcs,
- SP: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ SP: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
#[cfg(feature = "std")] {
if has_expired(&route_params) {
NS::Target: NodeSigner,
L::Target: Logger,
IH: Fn() -> InFlightHtlcs,
- SP: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ SP: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
match err {
PaymentSendFailure::AllFailedResendSafe(errs) => {
failure: events::PathFailure::InitialSend { err: e },
path,
short_channel_id: failed_scid,
- retry: None,
#[cfg(test)]
error_code: None,
#[cfg(test)]
where
ES::Target: EntropySource,
NS::Target: NodeSigner,
- F: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ F: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
let payment_id = PaymentId(entropy_source.get_secure_random_bytes());
) -> Result<(), PaymentSendFailure>
where
NS::Target: NodeSigner,
- F: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ F: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
if route.paths.len() < 1 {
return Err(PaymentSendFailure::ParameterError(APIError::InvalidRoute{err: "There must be at least one path to send over".to_owned()}));
return Err(PaymentSendFailure::PathParameterError(path_errs));
}
if let Some(amt_msat) = recv_value_msat {
- debug_assert!(amt_msat >= total_value);
total_value = amt_msat;
}
let mut results = Vec::new();
debug_assert_eq!(route.paths.len(), onion_session_privs.len());
for (path, session_priv) in route.paths.iter().zip(onion_session_privs.into_iter()) {
- let mut path_res = send_payment_along_path(&path, &route.payment_params, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage, session_priv);
+ let mut path_res = send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage, session_priv);
match path_res {
Ok(_) => {},
Err(APIError::MonitorUpdateInProgress) => {
) -> Result<(), PaymentSendFailure>
where
NS::Target: NodeSigner,
- F: Fn(&Vec<RouteHop>, &Option<PaymentParameters>, &PaymentHash, &Option<PaymentSecret>, u64,
- u32, PaymentId, &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
+ F: Fn(&Vec<RouteHop>, &PaymentHash, &Option<PaymentSecret>, u64, u32, PaymentId,
+ &Option<PaymentPreimage>, [u8; 32]) -> Result<(), APIError>
{
self.pay_route_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id,
recv_value_msat, onion_session_privs, node_signer, best_block_height,
pub(super) fn fail_htlc<L: Deref>(
&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason,
path: &Vec<RouteHop>, session_priv: &SecretKey, payment_id: &PaymentId,
- payment_params: &Option<PaymentParameters>, probing_cookie_secret: [u8; 32],
- secp_ctx: &Secp256k1<secp256k1::All>, pending_events: &Mutex<Vec<events::Event>>, logger: &L
+ probing_cookie_secret: [u8; 32], secp_ctx: &Secp256k1<secp256k1::All>,
+ pending_events: &Mutex<Vec<events::Event>>, logger: &L
) -> bool where L::Target: Logger {
#[cfg(test)]
let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_error.decode_onion_failure(secp_ctx, logger, &source);
let mut full_failure_ev = None;
let mut pending_retry_ev = false;
- let mut retry = None;
let attempts_remaining = if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(*payment_id) {
if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) {
log_trace!(logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
}
let mut is_retryable_now = payment.get().is_auto_retryable_now();
if let Some(scid) = short_channel_id {
+ // TODO: If we decided to blame ourselves (or one of our channels) in
+ // process_onion_failure we should close that channel as it implies our
+ // next-hop is needlessly blaming us!
payment.get_mut().insert_previously_failed_scid(scid);
}
- // We want to move towards only using the `PaymentParameters` in the outbound payments
- // map. However, for backwards-compatibility, we still need to support passing the
- // `PaymentParameters` data that was shoved in the HTLC (and given to us via
- // `payment_params`) back to the user.
- let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
- if let Some(params) = payment.get_mut().payment_parameters() {
- retry = Some(RouteParameters {
- payment_params: params.clone(),
- final_value_msat: path_last_hop.fee_msat,
- });
- } else if let Some(params) = payment_params {
- retry = Some(RouteParameters {
- payment_params: params.clone(),
- final_value_msat: path_last_hop.fee_msat,
- });
- }
-
- if payment_is_probe || !is_retryable_now || !payment_retryable || retry.is_none() {
+ if payment_is_probe || !is_retryable_now || !payment_retryable {
let _ = payment.get_mut().mark_abandoned(); // we'll only Err if it's a legacy payment
is_retryable_now = false;
}
}
}
} else {
- // TODO: If we decided to blame ourselves (or one of our channels) in
- // process_onion_failure we should close that channel as it implies our
- // next-hop is needlessly blaming us!
- if let Some(scid) = short_channel_id {
- retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
- }
// If we miss abandoning the payment above, we *must* generate an event here or else the
// payment will sit in our outbounds forever.
if attempts_remaining && !already_awaiting_retry {
failure: events::PathFailure::OnPath { network_update },
path: path.clone(),
short_channel_id,
- retry,
#[cfg(test)]
error_code: onion_error_code,
#[cfg(test)]
use bitcoin::network::constants::Network;
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+ use crate::events::{Event, PathFailure};
use crate::ln::PaymentHash;
use crate::ln::channelmanager::PaymentId;
use crate::ln::features::{ChannelFeatures, NodeFeatures};
use crate::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteHop, RouteParameters};
use crate::sync::{Arc, Mutex};
use crate::util::errors::APIError;
- use crate::util::events::{Event, PathFailure};
use crate::util::test_utils;
#[test]
outbound_payments.retry_payment_internal(
PaymentHash([0; 32]), PaymentId([0; 32]), expired_route_params, &&router, vec![],
&|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
- &pending_events, &|_, _, _, _, _, _, _, _, _| Ok(()));
+ &pending_events, &|_, _, _, _, _, _, _, _| Ok(()));
let events = pending_events.lock().unwrap();
assert_eq!(events.len(), 1);
if let Event::PaymentFailed { .. } = events[0] { } else { panic!("Unexpected event"); }
let err = outbound_payments.send_payment(
PaymentHash([0; 32]), &None, PaymentId([0; 32]), Retry::Attempts(0), expired_route_params,
&&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
- &pending_events, |_, _, _, _, _, _, _, _, _| Ok(())).unwrap_err();
+ &pending_events, |_, _, _, _, _, _, _, _| Ok(())).unwrap_err();
if let RetryableSendFailure::PaymentExpired = err { } else { panic!("Unexpected error"); }
}
}
outbound_payments.retry_payment_internal(
PaymentHash([0; 32]), PaymentId([0; 32]), route_params, &&router, vec![],
&|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
- &pending_events, &|_, _, _, _, _, _, _, _, _| Ok(()));
+ &pending_events, &|_, _, _, _, _, _, _, _| Ok(()));
let events = pending_events.lock().unwrap();
assert_eq!(events.len(), 1);
if let Event::PaymentFailed { .. } = events[0] { } else { panic!("Unexpected event"); }
let err = outbound_payments.send_payment(
PaymentHash([0; 32]), &None, PaymentId([0; 32]), Retry::Attempts(0), route_params,
&&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
- &pending_events, |_, _, _, _, _, _, _, _, _| Ok(())).unwrap_err();
+ &pending_events, |_, _, _, _, _, _, _, _| Ok(())).unwrap_err();
if let RetryableSendFailure::RouteNotFound = err {
} else { panic!("Unexpected error"); }
}
PaymentHash([0; 32]), &None, PaymentId([0; 32]), Retry::Attempts(0), route_params.clone(),
&&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
&pending_events,
- |_, _, _, _, _, _, _, _, _| Err(APIError::ChannelUnavailable { err: "test".to_owned() }))
+ |_, _, _, _, _, _, _, _| Err(APIError::ChannelUnavailable { err: "test".to_owned() }))
.unwrap();
let mut events = pending_events.lock().unwrap();
assert_eq!(events.len(), 2);
outbound_payments.send_payment(
PaymentHash([0; 32]), &None, PaymentId([0; 32]), Retry::Attempts(0), route_params.clone(),
&&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
- &pending_events, |_, _, _, _, _, _, _, _, _| Err(APIError::MonitorUpdateInProgress))
+ &pending_events, |_, _, _, _, _, _, _, _| Err(APIError::MonitorUpdateInProgress))
.unwrap();
{
let events = pending_events.lock().unwrap();
PaymentHash([0; 32]), &None, PaymentId([1; 32]), Retry::Attempts(0), route_params.clone(),
&&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger,
&pending_events,
- |_, _, _, _, _, _, _, _, _| Err(APIError::APIMisuseError { err: "test".to_owned() }))
+ |_, _, _, _, _, _, _, _| Err(APIError::APIMisuseError { err: "test".to_owned() }))
.unwrap();
let events = pending_events.lock().unwrap();
assert_eq!(events.len(), 2);
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::chain::keysinterface::EntropySource;
use crate::chain::transaction::OutPoint;
+use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure};
use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails};
use crate::ln::features::InvoiceFeatures;
use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
use crate::routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RouteParameters};
use crate::routing::scoring::ChannelUsage;
-use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure};
use crate::util::test_utils;
use crate::util::errors::APIError;
use crate::util::ser::Writeable;
+use crate::util::string::UntrustedString;
use bitcoin::{Block, BlockHeader, TxMerkleNode};
use bitcoin::hashes::Hash;
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[0].node.has_pending_payments());
- let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(as_broadcasted_txn.len(), 1);
- assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
+ nodes[0].node.timer_tick_occurred();
+ if !confirm_before_reload {
+ let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(as_broadcasted_txn.len(), 1);
+ assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
+ } else {
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ }
+ check_added_monitors!(nodes[0], 1);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
assert_eq!(node_id, nodes[1].node.get_our_node_id());
nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
- check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) });
check_added_monitors!(nodes[1], 1);
assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
},
// On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
// force-close the channel.
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
+ nodes[0].node.timer_tick_occurred();
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[0].node.has_pending_payments());
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
+ check_added_monitors!(nodes[0], 1);
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
assert_eq!(node_id, nodes[1].node.get_our_node_id());
nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
- check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) });
check_added_monitors!(nodes[1], 1);
bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
},
let mut events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events.drain(..).next().unwrap() {
- crate::util::events::Event::ProbeSuccessful { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
+ crate::events::Event::ProbeSuccessful { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
assert_eq!(payment_id, ev_pid);
assert_eq!(payment_hash, ev_ph);
},
let mut events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events.drain(..).next().unwrap() {
- crate::util::events::Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
+ crate::events::Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
assert_eq!(payment_id, ev_pid);
assert_eq!(payment_hash, ev_ph);
},
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
let (intercept_id, expected_outbound_amount_msat) = match events[0] {
- crate::util::events::Event::HTLCIntercepted {
+ crate::events::Event::HTLCIntercepted {
intercept_id, expected_outbound_amount_msat, payment_hash: pmt_hash, inbound_amount_msat, requested_next_hop_scid: short_channel_id
} => {
assert_eq!(pmt_hash, payment_hash);
if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); }
// Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid
// the double-claim that would otherwise appear at the end of this test.
+ nodes[0].node.timer_tick_occurred();
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(as_broadcasted_txn.len(), 1);
use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey};
use crate::chain::keysinterface::{KeysManager, NodeSigner, Recipient};
+use crate::events::{MessageSendEvent, MessageSendEventsProvider, OnionMessageProvider};
use crate::ln::features::{InitFeatures, NodeFeatures};
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, OnionMessageHandler, RoutingMessageHandler};
use crate::onion_message::{CustomOnionMessageContents, CustomOnionMessageHandler, SimpleArcOnionMessenger, SimpleRefOnionMessenger};
use crate::routing::gossip::{NetworkGraph, P2PGossipSync, NodeId};
use crate::util::atomic_counter::AtomicCounter;
-use crate::util::events::{MessageSendEvent, MessageSendEventsProvider, OnionMessageProvider};
use crate::util::logger::Logger;
use crate::prelude::*;
/// to a remote host. You will need to be able to generate multiple of these which meet Eq and
/// implement Hash to meet the PeerManager API.
///
-/// For efficiency, Clone should be relatively cheap for this type.
+/// For efficiency, [`Clone`] should be relatively cheap for this type.
///
/// Two descriptors may compare equal (by [`cmp::Eq`] and [`hash::Hash`]) as long as the original
/// has been disconnected, the [`PeerManager`] has been informed of the disconnection (either by it
/// SimpleRefPeerManager is the more appropriate type. Defining these type aliases prevents
/// issues such as overly long function definitions.
///
-/// (C-not exported) as `Arc`s don't make sense in bindings.
+/// This is not exported to bindings users as `Arc`s don't make sense in bindings.
pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArcChannelManager<M, T, F, L>>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<L>>>, Arc<C>, Arc<L>>>, Arc<SimpleArcOnionMessenger<L>>, Arc<L>, IgnoringMessageHandler, Arc<KeysManager>>;
/// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference
/// But if this is not necessary, using a reference is more efficient. Defining these type aliases
/// helps with issues such as long function definitions.
///
-/// (C-not exported) as general type aliases don't make sense in bindings.
+/// This is not exported to bindings users as general type aliases don't make sense in bindings.
pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, 'j, 'k, 'l, 'm, SD, M, T, F, C, L> = PeerManager<SD, SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'm, M, T, F, L>, &'f P2PGossipSync<&'g NetworkGraph<&'f L>, &'h C, &'f L>, &'i SimpleRefOnionMessenger<'j, 'k, L>, &'f L, IgnoringMessageHandler, &'c KeysManager>;
/// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls
/// [`PeerManager`] functions related to the same connection must occur only in serial, making new
/// calls only after previous ones have returned.
///
-/// Rather than using a plain PeerManager, it is preferable to use either a SimpleArcPeerManager
-/// a SimpleRefPeerManager, for conciseness. See their documentation for more details, but
-/// essentially you should default to using a SimpleRefPeerManager, and use a
-/// SimpleArcPeerManager when you require a PeerManager with a static lifetime, such as when
+/// Rather than using a plain [`PeerManager`], it is preferable to use either a [`SimpleArcPeerManager`]
+/// a [`SimpleRefPeerManager`], for conciseness. See their documentation for more details, but
+/// essentially you should default to using a [`SimpleRefPeerManager`], and use a
+/// [`SimpleArcPeerManager`] when you require a `PeerManager` with a static lifetime, such as when
/// you're using lightning-net-tokio.
///
/// [`read_event`]: PeerManager::read_event
/// `OnionMessageHandler`. No routing message handler is used and network graph messages are
/// ignored.
///
- /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
+ /// `ephemeral_random_data` is used to derive per-connection ephemeral keys and must be
/// cryptographically secure random bytes.
///
/// `current_time` is used as an always-increasing counter that survives across restarts and is
/// timestamp, however if it is not available a persistent counter that increases once per
/// minute should suffice.
///
- /// (C-not exported) as we can't export a PeerManager with a dummy route handler
+ /// This is not exported to bindings users as we can't export a PeerManager with a dummy route handler
pub fn new_channel_only(channel_message_handler: CM, onion_message_handler: OM, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS) -> Self {
Self::new(MessageHandler {
chan_handler: channel_message_handler,
/// timestamp, however if it is not available a persistent counter that increases once per
/// minute should suffice.
///
- /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
+ /// `ephemeral_random_data` is used to derive per-connection ephemeral keys and must be
/// cryptographically secure random bytes.
///
- /// (C-not exported) as we can't export a PeerManager with a dummy channel handler
+ /// This is not exported to bindings users as we can't export a PeerManager with a dummy channel handler
pub fn new_routing_only(routing_message_handler: RM, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS) -> Self {
Self::new(MessageHandler {
chan_handler: ErroringMessageHandler::new(),
CMH::Target: CustomMessageHandler,
NS::Target: NodeSigner
{
- /// Constructs a new PeerManager with the given message handlers and node_id secret key
- /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
+ /// Constructs a new `PeerManager` with the given message handlers.
+ ///
+ /// `ephemeral_random_data` is used to derive per-connection ephemeral keys and must be
/// cryptographically secure random bytes.
///
/// `current_time` is used as an always-increasing counter that survives across restarts and is
/// Returns a small number of bytes to send to the remote node (currently always 50).
///
/// Panics if descriptor is duplicative with some other descriptor which has not yet been
- /// [`socket_disconnected()`].
+ /// [`socket_disconnected`].
///
- /// [`socket_disconnected()`]: PeerManager::socket_disconnected
+ /// [`socket_disconnected`]: PeerManager::socket_disconnected
pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<Vec<u8>, PeerHandleError> {
let mut peer_encryptor = PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key());
let res = peer_encryptor.get_act_one(&self.secp_ctx).to_vec();
/// the connection immediately.
///
/// Panics if descriptor is duplicative with some other descriptor which has not yet been
- /// [`socket_disconnected()`].
+ /// [`socket_disconnected`].
///
- /// [`socket_disconnected()`]: PeerManager::socket_disconnected
+ /// [`socket_disconnected`]: PeerManager::socket_disconnected
pub fn new_inbound_connection(&self, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<(), PeerHandleError> {
let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.node_signer);
let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes
/// May call [`send_data`] on the descriptor passed in (or an equal descriptor) before
/// returning. Thus, be very careful with reentrancy issues! The invariants around calling
/// [`write_buffer_space_avail`] in case a write did not fully complete must still hold - be
- /// ready to call `[write_buffer_space_avail`] again if a write call generated here isn't
+ /// ready to call [`write_buffer_space_avail`] again if a write call generated here isn't
/// sufficient!
///
/// [`send_data`]: SocketDescriptor::send_data
#[cfg(test)]
mod tests {
use crate::chain::keysinterface::{NodeSigner, Recipient};
+ use crate::events;
use crate::ln::peer_channel_encryptor::PeerChannelEncryptor;
use crate::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
use crate::ln::{msgs, wire};
use crate::ln::msgs::NetAddress;
- use crate::util::events;
use crate::util::test_utils;
use bitcoin::secp256k1::SecretKey;
if peers[0].read_event(&mut fd_a, &b_data).is_err() { break; }
cfgs[0].chan_handler.pending_events.lock().unwrap()
- .push(crate::util::events::MessageSendEvent::SendShutdown {
+ .push(crate::events::MessageSendEvent::SendShutdown {
node_id: peers[1].node_signer.get_node_id(Recipient::Node).unwrap(),
msg: msgs::Shutdown {
channel_id: [0; 32],
},
});
cfgs[1].chan_handler.pending_events.lock().unwrap()
- .push(crate::util::events::MessageSendEvent::SendShutdown {
+ .push(crate::events::MessageSendEvent::SendShutdown {
node_id: peers[0].node_signer.get_node_id(Recipient::Node).unwrap(),
msg: msgs::Shutdown {
channel_id: [0; 32],
use crate::chain::ChannelMonitorUpdateStatus;
use crate::chain::keysinterface::NodeSigner;
+use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
use crate::ln::channelmanager::{ChannelManager, MIN_CLTV_EXPIRY_DELTA, PaymentId};
use crate::routing::gossip::RoutingFees;
use crate::routing::router::{PaymentParameters, RouteHint, RouteHintHop};
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ChannelUpdate, ErrorAction};
use crate::ln::wire::Encode;
-use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
use crate::util::config::UserConfig;
use crate::util::ser::Writeable;
use crate::util::test_utils;
check_added_monitors!(nodes[2], 1);
let cs_funding_signed = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[1].node.get_our_node_id());
+ expect_channel_pending_event(&nodes[2], &nodes[1].node.get_our_node_id());
+
nodes[1].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &cs_funding_signed);
+ expect_channel_pending_event(&nodes[1], &nodes[2].node.get_our_node_id());
check_added_monitors!(nodes[1], 1);
let conf_height = core::cmp::max(nodes[1].best_block_info().1 + 1, nodes[2].best_block_info().1 + 1);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, true, true);
- nodes[1].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), regex::Regex::new(r"Refusing to forward over real channel SCID as our counterparty requested").unwrap(), 1);
+ nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", regex::Regex::new(r"Refusing to forward over real channel SCID as our counterparty requested").unwrap(), 1);
let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
let channel_id = funding_output.to_channel_id();
nodes[1].chain_monitor.complete_sole_pending_chan_update(&channel_id);
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(bs_signed_locked.len(), 2);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
nodes[0].chain_monitor.complete_sole_pending_chan_update(&channel_id);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ crate::events::Event::ChannelPending { ref counterparty_node_id, .. } => {
+ assert_eq!(nodes[1].node.get_our_node_id(), *counterparty_node_id);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ crate::events::Event::ChannelReady { ref counterparty_node_id, .. } => {
+ assert_eq!(nodes[1].node.get_our_node_id(), *counterparty_node_id);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
let as_locked_update = nodes[0].node.get_and_clear_pending_msg_events();
// Note that the funding transaction is actually released when
}
_ => panic!("Unexpected event"),
}
- expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
let bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
use crate::chain::channelmonitor::ChannelMonitor;
use crate::chain::keysinterface::EntropySource;
use crate::chain::transaction::OutPoint;
+use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId};
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
use crate::util::enforcing_trait_impls::EnforcingSigner;
use crate::util::test_utils;
use crate::util::errors::APIError;
-use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
use crate::util::ser::{Writeable, ReadableArgs};
use crate::util::config::UserConfig;
+use crate::util::string::UntrustedString;
use bitcoin::hash_types::BlockHash;
}
// Normally, this is where node_a would broadcast the funding transaction, but the test de/serializes first instead
+ expect_channel_pending_event(&node_a, &node_b.node.get_our_node_id());
+ expect_channel_pending_event(&node_b, &node_a.node.get_our_node_id());
+
nodes.push(node_a);
nodes.push(node_b);
nodes_0_deserialized = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());
- { // Channel close should result in a commitment tx
- let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
- assert_eq!(txn.len(), 1);
- check_spends!(txn[0], funding_tx);
- assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
- }
-
for monitor in node_0_monitors.drain(..) {
assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
ChannelMonitorUpdateStatus::Completed);
check_added_monitors!(nodes[0], 1);
}
nodes[0].node = &nodes_0_deserialized;
+
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
+ { // Channel close should result in a commitment tx
+ nodes[0].node.timer_tick_occurred();
+ let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], funding_tx);
+ assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
+ }
+ check_added_monitors!(nodes[0], 1);
// nodes[1] and nodes[2] have no lost state with nodes[0]...
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
assert!(nodes[1].node.list_usable_channels().is_empty());
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) });
check_closed_broadcast!(nodes[1], false);
}
});
}
+ nodes[1].node.timer_tick_occurred();
let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(bs_commitment_tx.len(), 1);
+ check_added_monitors!(nodes[1], 1);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
use crate::chain::channelmonitor::ANTI_REORG_DELAY;
use crate::chain::transaction::OutPoint;
use crate::chain::Confirm;
+use crate::events::{Event, MessageSendEventsProvider, ClosureReason, HTLCDestination};
use crate::ln::channelmanager::ChannelManager;
use crate::ln::msgs::{ChannelMessageHandler, Init};
-use crate::util::events::{Event, MessageSendEventsProvider, ClosureReason, HTLCDestination};
use crate::util::test_utils;
use crate::util::ser::Writeable;
+use crate::util::string::UntrustedString;
use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::blockdata::script::Builder;
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan.2).encode();
reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
- if !reorg_after_reload {
- // If the channel is already closed when we reload the node, we'll broadcast a closing
- // transaction via the ChannelMonitor which is missing a corresponding channel.
- assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
- nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
- }
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
}
if reorg_after_reload {
nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update
check_added_monitors!(nodes[0], 1);
let expected_err = "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.";
- check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Channel closed because of an exception: ".to_owned() + expected_err });
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) });
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: expected_err.to_owned() });
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
use crate::chain::keysinterface::{EntropySource, SignerProvider};
use crate::chain::transaction::OutPoint;
+use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId};
use crate::routing::router::{PaymentParameters, get_route};
use crate::ln::msgs;
use crate::ln::script::ShutdownScript;
use crate::util::test_utils;
use crate::util::test_utils::OnGetShutdownScriptpubkey;
-use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use crate::util::errors::APIError;
use crate::util::config::UserConfig;
+use crate::util::string::UntrustedString;
use bitcoin::blockdata::script::Builder;
use bitcoin::blockdata::opcodes;
// closing_signed so we do it ourselves
check_closed_broadcast!(nodes[1], false);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) });
}
assert!(nodes[0].node.list_channels().is_empty());
impl<T> TestEq for T {}
-/// A Lightning message returned by [`read()`] when decoding bytes received over the wire. Each
+/// A Lightning message returned by [`read`] when decoding bytes received over the wire. Each
/// variant contains a message from [`msgs`] or otherwise the message type if unknown.
#[allow(missing_docs)]
#[derive(Debug)]
use bitcoin::secp256k1::{PublicKey, Secp256k1};
use crate::io;
+use crate::io_extras::read_to_end;
use crate::sync::Arc;
struct MessengerNode {
fn handle_custom_message(&self, _msg: Self::CustomMessage) {}
fn read_custom_message<R: io::Read>(&self, message_type: u64, buffer: &mut R) -> Result<Option<Self::CustomMessage>, DecodeError> where Self: Sized {
if message_type == CUSTOM_MESSAGE_TYPE {
- let mut buf = Vec::new();
- buffer.read_to_end(&mut buf)?;
+ let buf = read_to_end(buffer)?;
assert_eq!(buf, CUSTOM_MESSAGE_CONTENTS);
return Ok(Some(TestCustomMessage {}))
}
node.messenger.handle_onion_message(&prev_node.get_node_pk(), &onion_msg);
if idx == num_nodes - 1 {
node.logger.assert_log_contains(
- "lightning::onion_message::messenger".to_string(),
- format!("Received an onion message with path_id: {:02x?}", expected_path_id).to_string(), 1);
+ "lightning::onion_message::messenger",
+ &format!("Received an onion message with path_id: {:02x?}", expected_path_id), 1);
}
prev_node = node;
}
pass_along_path(&nodes, None);
// Make sure the last node successfully decoded the reply path.
nodes[3].logger.assert_log_contains(
- "lightning::onion_message::messenger".to_string(),
- format!("Received an onion message with path_id None and a reply_path").to_string(), 1);
+ "lightning::onion_message::messenger",
+ &format!("Received an onion message with path_id None and a reply_path"), 1);
// Destination::BlindedPath
let blinded_path = BlindedPath::new(&[nodes[1].get_node_pk(), nodes[2].get_node_pk(), nodes[3].get_node_pk()], &*nodes[3].keys_manager, &secp_ctx).unwrap();
nodes[0].messenger.send_onion_message(&[], Destination::BlindedPath(blinded_path), OnionMessageContents::Custom(test_msg), Some(reply_path)).unwrap();
pass_along_path(&nodes, None);
nodes[3].logger.assert_log_contains(
- "lightning::onion_message::messenger".to_string(),
- format!("Received an onion message with path_id None and a reply_path").to_string(), 2);
+ "lightning::onion_message::messenger",
+ &format!("Received an onion message with path_id None and a reply_path"), 2);
}
#[test]
use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient};
+use crate::events::OnionMessageProvider;
use crate::ln::features::{InitFeatures, NodeFeatures};
use crate::ln::msgs::{self, OnionMessageHandler};
use crate::ln::onion_utils;
pub use super::packet::{CustomOnionMessageContents, OnionMessageContents};
use super::packet::{BIG_PACKET_HOP_DATA_LEN, ForwardControlTlvs, Packet, Payload, ReceiveControlTlvs, SMALL_PACKET_HOP_DATA_LEN};
use super::utils;
-use crate::util::events::OnionMessageProvider;
use crate::util::logger::Logger;
use crate::util::ser::Writeable;
/// Useful for simplifying the parameters of [`SimpleArcChannelManager`] and
/// [`SimpleArcPeerManager`]. See their docs for more details.
///
-/// (C-not exported) as `Arc`s don't make sense in bindings.
+/// This is not exported to bindings users as `Arc`s don't make sense in bindings.
///
/// [`SimpleArcChannelManager`]: crate::ln::channelmanager::SimpleArcChannelManager
/// [`SimpleArcPeerManager`]: crate::ln::peer_handler::SimpleArcPeerManager
/// Useful for simplifying the parameters of [`SimpleRefChannelManager`] and
/// [`SimpleRefPeerManager`]. See their docs for more details.
///
-/// (C-not exported) as general type aliases don't make sense in bindings.
+/// This is not exported to bindings users as general type aliases don't make sense in bindings.
///
/// [`SimpleRefChannelManager`]: crate::ln::channelmanager::SimpleRefChannelManager
/// [`SimpleRefPeerManager`]: crate::ln::peer_handler::SimpleRefPeerManager
impl<T: CustomOnionMessageContents> OnionMessageContents<T> {
/// Returns the type that was used to decode the message payload.
///
- /// (C-not exported) as methods on non-cloneable enums are not currently exportable
+ /// This is not exported to bindings users as methods on non-cloneable enums are not currently exportable
pub fn tlv_type(&self) -> u64 {
match self {
&OnionMessageContents::Custom(ref msg) => msg.tlv_type(),
}
}
-/// (C-not exported) as methods on non-cloneable enums are not currently exportable
+/// This is not exported to bindings users as methods on non-cloneable enums are not currently exportable
impl<T: CustomOnionMessageContents> Writeable for OnionMessageContents<T> {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
match self {
use bitcoin::network::constants::Network;
use bitcoin::blockdata::constants::genesis_block;
+use crate::events::{MessageSendEvent, MessageSendEventsProvider};
use crate::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures};
use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter};
use crate::routing::utxo::{self, UtxoLookup};
use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, MaybeReadable};
use crate::util::logger::{Logger, Level};
-use crate::util::events::{MessageSendEvent, MessageSendEventsProvider};
use crate::util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK};
use crate::util::string::PrintableString;
use crate::util::indexed_map::{IndexedMap, Entry as IndexedMapEntry};
where U::Target: UtxoLookup, L::Target: Logger
{
/// Creates a new tracker of the actual state of the network of channels and nodes,
- /// assuming an existing Network Graph.
+ /// assuming an existing [`NetworkGraph`].
/// UTXO lookup is used to make sure announced channels exist on-chain, channel data is
/// correct, and the announcement is signed with channel owners' keys.
pub fn new(network_graph: G, utxo_lookup: Option<U>, logger: L) -> Self {
/// Gets a reference to the underlying [`NetworkGraph`] which was provided in
/// [`P2PGossipSync::new`].
///
- /// (C-not exported) as bindings don't support a reference-to-a-reference yet
+ /// This is not exported to bindings users as bindings don't support a reference-to-a-reference yet
pub fn network_graph(&self) -> &G {
&self.network_graph
}
impl<L: Deref> NetworkGraph<L> where L::Target: Logger {
/// Handles any network updates originating from [`Event`]s.
///
- /// [`Event`]: crate::util::events::Event
+ /// [`Event`]: crate::events::Event
pub fn handle_network_update(&self, network_update: &NetworkUpdate) {
match *network_update {
NetworkUpdate::ChannelUpdateMessage { ref msg } => {
}
/// Initiates a stateless sync of routing gossip information with a peer
- /// using gossip_queries. The default strategy used by this implementation
+ /// using [`gossip_queries`]. The default strategy used by this implementation
/// is to sync the full block range with several peers.
///
- /// We should expect one or more reply_channel_range messages in response
- /// to our query_channel_range. Each reply will enqueue a query_scid message
+ /// We should expect one or more [`reply_channel_range`] messages in response
+ /// to our [`query_channel_range`]. Each reply will enqueue a [`query_scid`] message
/// to request gossip messages for each channel. The sync is considered complete
- /// when the final reply_scids_end message is received, though we are not
+ /// when the final [`reply_scids_end`] message is received, though we are not
/// tracking this directly.
+ ///
+ /// [`gossip_queries`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#query-messages
+ /// [`reply_channel_range`]: msgs::ReplyChannelRange
+ /// [`query_channel_range`]: msgs::QueryChannelRange
+ /// [`query_scid`]: msgs::QueryShortChannelIds
+ /// [`reply_scids_end`]: msgs::ReplyShortChannelIdsEnd
fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init, _inbound: bool) -> Result<(), ()> {
// We will only perform a sync with peers that support gossip_queries.
if !init_msg.features.supports_gossip_queries() {
/// May be invalid or malicious (eg control chars),
/// should not be exposed to the user.
pub alias: NodeAlias,
- /// Internet-level addresses via which one can connect to the node
- pub addresses: Vec<NetAddress>,
/// An initial announcement of the node
/// Mostly redundant with the data we store in fields explicitly.
/// Everything else is useful only for sending out for initial routing sync.
pub announcement_message: Option<NodeAnnouncement>
}
-impl_writeable_tlv_based!(NodeAnnouncementInfo, {
- (0, features, required),
- (2, last_update, required),
- (4, rgb, required),
- (6, alias, required),
- (8, announcement_message, option),
- (10, addresses, vec_type),
-});
+impl NodeAnnouncementInfo {
+ /// Internet-level addresses via which one can connect to the node
+ pub fn addresses(&self) -> &[NetAddress] {
+ self.announcement_message.as_ref()
+ .map(|msg| msg.contents.addresses.as_slice())
+ .unwrap_or_default()
+ }
+}
+
+impl Writeable for NodeAnnouncementInfo {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ let empty_addresses = Vec::<NetAddress>::new();
+ write_tlv_fields!(writer, {
+ (0, self.features, required),
+ (2, self.last_update, required),
+ (4, self.rgb, required),
+ (6, self.alias, required),
+ (8, self.announcement_message, option),
+ (10, empty_addresses, vec_type), // Versions prior to 0.0.115 require this field
+ });
+ Ok(())
+ }
+}
+
+impl Readable for NodeAnnouncementInfo {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ _init_and_read_tlv_fields!(reader, {
+ (0, features, required),
+ (2, last_update, required),
+ (4, rgb, required),
+ (6, alias, required),
+ (8, announcement_message, option),
+ (10, _addresses, vec_type), // deprecated, not used anymore
+ });
+ let _: Option<Vec<NetAddress>> = _addresses;
+ Ok(Self { features: features.0.unwrap(), last_update: last_update.0.unwrap(), rgb: rgb.0.unwrap(),
+ alias: alias.0.unwrap(), announcement_message })
+ }
+}
/// A user-defined name for a node, which may be used when displaying the node in a graph.
///
}
}
-// A wrapper allowing for the optional deseralization of `NodeAnnouncementInfo`. Utilizing this is
+// A wrapper allowing for the optional deserialization of `NodeAnnouncementInfo`. Utilizing this is
// necessary to maintain compatibility with previous serializations of `NetAddress` that have an
// invalid hostname set. We ignore and eat all errors until we are either able to read a
// `NodeAnnouncementInfo` or hit a `ShortRead`, i.e., read the TLV field to the end.
last_update: msg.timestamp,
rgb: msg.rgb,
alias: NodeAlias(msg.alias),
- addresses: msg.addresses.clone(),
announcement_message: if should_relay { full_msg.cloned() } else { None },
});
impl ReadOnlyNetworkGraph<'_> {
/// Returns all known valid channels' short ids along with announced channel info.
///
- /// (C-not exported) because we don't want to return lifetime'd references
+ /// This is not exported to bindings users because we don't want to return lifetime'd references
pub fn channels(&self) -> &IndexedMap<u64, ChannelInfo> {
&*self.channels
}
/// Returns all known nodes' public keys along with announced node info.
///
- /// (C-not exported) because we don't want to return lifetime'd references
+ /// This is not exported to bindings users because we don't want to return lifetime'd references
pub fn nodes(&self) -> &IndexedMap<NodeId, NodeInfo> {
&*self.nodes
}
/// Returns None if the requested node is completely unknown,
/// or if node announcement for the node was never received.
pub fn get_addresses(&self, pubkey: &PublicKey) -> Option<Vec<NetAddress>> {
- if let Some(node) = self.nodes.get(&NodeId::from_pubkey(&pubkey)) {
- if let Some(node_info) = node.announcement_info.as_ref() {
- return Some(node_info.addresses.clone())
- }
- }
- None
+ self.nodes.get(&NodeId::from_pubkey(&pubkey))
+ .and_then(|node| node.announcement_info.as_ref().map(|ann| ann.addresses().to_vec()))
}
}
#[cfg(test)]
pub(crate) mod tests {
+ use crate::events::{MessageSendEvent, MessageSendEventsProvider};
use crate::ln::channelmanager;
use crate::ln::chan_utils::make_funding_redeemscript;
#[cfg(feature = "std")]
ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT};
use crate::util::config::UserConfig;
use crate::util::test_utils;
- use crate::util::ser::{ReadableArgs, Writeable};
- use crate::util::events::{MessageSendEvent, MessageSendEventsProvider};
+ use crate::util::ser::{ReadableArgs, Readable, Writeable};
use crate::util::scid_utils::scid_from_parts;
use crate::routing::gossip::REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS;
#[test]
fn node_info_is_readable() {
- use std::convert::TryFrom;
-
// 1. Check we can read a valid NodeAnnouncementInfo and fail on an invalid one
- let valid_netaddr = crate::ln::msgs::NetAddress::Hostname { hostname: crate::util::ser::Hostname::try_from("A".to_string()).unwrap(), port: 1234 };
+ let announcement_message = hex::decode("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000122013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010000701fffefdfc2607").unwrap();
+ let announcement_message = NodeAnnouncement::read(&mut announcement_message.as_slice()).unwrap();
let valid_node_ann_info = NodeAnnouncementInfo {
features: channelmanager::provided_node_features(&UserConfig::default()),
last_update: 0,
rgb: [0u8; 3],
alias: NodeAlias([0u8; 32]),
- addresses: vec![valid_netaddr],
- announcement_message: None,
+ announcement_message: Some(announcement_message)
};
let mut encoded_valid_node_ann_info = Vec::new();
assert!(valid_node_ann_info.write(&mut encoded_valid_node_ann_info).is_ok());
- let read_valid_node_ann_info: NodeAnnouncementInfo = crate::util::ser::Readable::read(&mut encoded_valid_node_ann_info.as_slice()).unwrap();
+ let read_valid_node_ann_info = NodeAnnouncementInfo::read(&mut encoded_valid_node_ann_info.as_slice()).unwrap();
assert_eq!(read_valid_node_ann_info, valid_node_ann_info);
+ assert_eq!(read_valid_node_ann_info.addresses().len(), 1);
let encoded_invalid_node_ann_info = hex::decode("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap();
- let read_invalid_node_ann_info_res: Result<NodeAnnouncementInfo, crate::ln::msgs::DecodeError> = crate::util::ser::Readable::read(&mut encoded_invalid_node_ann_info.as_slice());
+ let read_invalid_node_ann_info_res = NodeAnnouncementInfo::read(&mut encoded_invalid_node_ann_info.as_slice());
assert!(read_invalid_node_ann_info_res.is_err());
// 2. Check we can read a NodeInfo anyways, but set the NodeAnnouncementInfo to None if invalid
let mut encoded_valid_node_info = Vec::new();
assert!(valid_node_info.write(&mut encoded_valid_node_info).is_ok());
- let read_valid_node_info: NodeInfo = crate::util::ser::Readable::read(&mut encoded_valid_node_info.as_slice()).unwrap();
+ let read_valid_node_info = NodeInfo::read(&mut encoded_valid_node_info.as_slice()).unwrap();
assert_eq!(read_valid_node_info, valid_node_info);
let encoded_invalid_node_info_hex = hex::decode("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap();
- let read_invalid_node_info: NodeInfo = crate::util::ser::Readable::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap();
+ let read_invalid_node_info = NodeInfo::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap();
assert_eq!(read_invalid_node_info.announcement_info, None);
}
+
+ #[test]
+ fn test_node_info_keeps_compatibility() {
+ let old_ann_info_with_addresses = hex::decode("3f0009000708a000080a51220204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014104d2").unwrap();
+ let ann_info_with_addresses = NodeAnnouncementInfo::read(&mut old_ann_info_with_addresses.as_slice())
+ .expect("to be able to read an old NodeAnnouncementInfo with addresses");
+ // This serialized info has an address field but no announcement_message, therefore the addresses returned by our function will still be empty
+ assert!(ann_info_with_addresses.addresses().is_empty());
+ }
}
#[cfg(all(test, feature = "_bench_unstable"))]
/// This is used by `ChannelManager` to track information which may be required for retries,
/// provided back to you via [`Event::PaymentPathFailed`].
///
- /// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
+ /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed
pub payment_params: Option<PaymentParameters>,
}
/// Passed to [`find_route`] and [`build_route_from_hops`], but also provided in
/// [`Event::PaymentPathFailed`].
///
-/// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
+/// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RouteParameters {
/// The parameters of the failed payment path.
/// Includes the payee's features.
///
- /// (C-not exported) since bindings don't support move semantics
+ /// This is not exported to bindings users since bindings don't support move semantics
pub fn with_features(self, features: InvoiceFeatures) -> Self {
Self { features: Some(features), ..self }
}
/// Includes hints for routing to the payee.
///
- /// (C-not exported) since bindings don't support move semantics
+ /// This is not exported to bindings users since bindings don't support move semantics
pub fn with_route_hints(self, route_hints: Vec<RouteHint>) -> Self {
Self { route_hints, ..self }
}
/// Includes a payment expiration in seconds relative to the UNIX epoch.
///
- /// (C-not exported) since bindings don't support move semantics
+ /// This is not exported to bindings users since bindings don't support move semantics
pub fn with_expiry_time(self, expiry_time: u64) -> Self {
Self { expiry_time: Some(expiry_time), ..self }
}
/// Includes a limit for the total CLTV expiry delta which is considered during routing
///
- /// (C-not exported) since bindings don't support move semantics
+ /// This is not exported to bindings users since bindings don't support move semantics
pub fn with_max_total_cltv_expiry_delta(self, max_total_cltv_expiry_delta: u32) -> Self {
Self { max_total_cltv_expiry_delta, ..self }
}
/// Includes a limit for the maximum number of payment paths that may be used.
///
- /// (C-not exported) since bindings don't support move semantics
+ /// This is not exported to bindings users since bindings don't support move semantics
pub fn with_max_path_count(self, max_path_count: u8) -> Self {
Self { max_path_count, ..self }
}
/// Includes a limit for the maximum number of payment paths that may be used.
///
- /// (C-not exported) since bindings don't support move semantics
+ /// This is not exported to bindings users since bindings don't support move semantics
pub fn with_max_channel_saturation_power_of_half(self, max_channel_saturation_power_of_half: u8) -> Self {
Self { max_channel_saturation_power_of_half, ..self }
}
/// [`ChannelManager::list_usable_channels`] will never include such channels.
///
/// [`ChannelManager::list_usable_channels`]: crate::ln::channelmanager::ChannelManager::list_usable_channels
-/// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
+/// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed
/// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph
pub fn find_route<L: Deref, GL: Deref, S: Score>(
our_node_pubkey: &PublicKey, route_params: &RouteParameters,
inbound_htlc_minimum_msat: None,
inbound_htlc_maximum_msat: None,
config: None,
+ feerate_sat_per_1000_weight: None
}
}
inbound_htlc_minimum_msat: None,
inbound_htlc_maximum_msat: None,
config: None,
+ feerate_sat_per_1000_weight: None,
}
}
#[cfg(not(c_bindings))]
impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {}
-
-/// (C-not exported)
+/// This is not exported to bindings users
impl<'a, T: 'a + Score> LockableScore<'a> for Mutex<T> {
type Locked = MutexGuard<'a, T>;
}
#[cfg(c_bindings)]
-/// (C-not exported)
+/// This is not exported to bindings users
impl<'a, T: Writeable> Writeable for RefMut<'a, T> {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
T::write(&**self, writer)
}
#[cfg(c_bindings)]
-/// (C-not exported)
+/// This is not exported to bindings users
impl<'a, S: Writeable> Writeable for MutexGuard<'a, S> {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
S::write(&**self, writer)
/// Probabilistic [`Score`] implementation.
///
-/// (C-not exported) generally all users should use the [`ProbabilisticScorer`] type alias.
+/// This is not exported to bindings users generally all users should use the [`ProbabilisticScorer`] type alias.
pub struct ProbabilisticScorerUsingTime<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time>
where L::Target: Logger {
params: ProbabilisticScoringParameters,
/// node. Note that a manual penalty of `u64::max_value()` means the node would not ever be
/// considered during path finding.
///
- /// (C-not exported)
+ /// This is not exported to bindings users
pub manual_node_penalties: HashMap<NodeId, u64>,
/// This penalty is applied when `htlc_maximum_msat` is equal to or larger than half of the
use bitcoin::{BlockHash, TxOut};
use bitcoin::hashes::hex::ToHex;
+use crate::events::MessageSendEvent;
use crate::ln::chan_utils::make_funding_redeemscript_from_slices;
use crate::ln::msgs::{self, LightningError, ErrorAction};
use crate::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
-use crate::util::events::MessageSendEvent;
use crate::util::logger::{Level, Logger};
use crate::util::ser::Writeable;
use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
use std::sync::Condvar as StdCondvar;
+pub use std::sync::WaitTimeoutResult;
+
use crate::prelude::HashMap;
use super::{LockTestExt, LockHeldState};
Condvar { inner: StdCondvar::new() }
}
- pub fn wait<'a, T>(&'a self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
+ pub fn wait_while<'a, T, F: FnMut(&mut T) -> bool>(&'a self, guard: MutexGuard<'a, T>, condition: F)
+ -> LockResult<MutexGuard<'a, T>> {
let mutex: &'a Mutex<T> = guard.mutex;
- self.inner.wait(guard.into_inner()).map(|lock| MutexGuard { mutex, lock }).map_err(|_| ())
+ self.inner.wait_while(guard.into_inner(), condition).map(|lock| MutexGuard { mutex, lock })
+ .map_err(|_| ())
}
#[allow(unused)]
- pub fn wait_timeout<'a, T>(&'a self, guard: MutexGuard<'a, T>, dur: Duration) -> LockResult<(MutexGuard<'a, T>, ())> {
+ pub fn wait_timeout_while<'a, T, F: FnMut(&mut T) -> bool>(&'a self, guard: MutexGuard<'a, T>, dur: Duration, condition: F)
+ -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
let mutex = guard.mutex;
- self.inner.wait_timeout(guard.into_inner(), dur).map(|(lock, _)| (MutexGuard { mutex, lock }, ())).map_err(|_| ())
+ self.inner.wait_timeout_while(guard.into_inner(), dur, condition).map_err(|_| ())
+ .map(|(lock, e)| (MutexGuard { mutex, lock }, e))
}
pub fn notify_all(&self) { self.inner.notify_all(); }
_lockdep_trace: Backtrace,
}
+// Locates the frame preceding the earliest `debug_sync` frame in the call stack. This ensures we
+// can properly detect a lock's construction and acquiral callsites, since the latter may contain
+// multiple `debug_sync` frames.
#[cfg(feature = "backtrace")]
-fn get_construction_location(backtrace: &Backtrace) -> (String, Option<u32>) {
- // Find the first frame that is after `debug_sync` (or that is in our tests) and use
- // that as the mutex construction site. Note that the first few frames may be in
- // the `backtrace` crate, so we have to ignore those.
+fn locate_call_symbol(backtrace: &Backtrace) -> (String, Option<u32>) {
+ // Find the earliest `debug_sync` frame (or that is in our tests) and use the frame preceding it
+ // as the callsite. Note that the first few frames may be in the `backtrace` crate, so we have
+ // to ignore those.
let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync").unwrap();
let mut found_debug_sync = false;
- for frame in backtrace.frames() {
- for symbol in frame.symbols() {
- let symbol_name = symbol.name().unwrap().as_str().unwrap();
- if !sync_mutex_constr_regex.is_match(symbol_name) {
- if found_debug_sync {
- return (format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()), symbol.colno());
- }
- } else { found_debug_sync = true; }
+ let mut symbol_after_latest_debug_sync = None;
+ for frame in backtrace.frames().iter() {
+ for symbol in frame.symbols().iter() {
+ if let Some(symbol_name) = symbol.name().map(|name| name.as_str()).flatten() {
+ if !sync_mutex_constr_regex.is_match(symbol_name) {
+ if found_debug_sync {
+ symbol_after_latest_debug_sync = Some(symbol);
+ found_debug_sync = false;
+ }
+ } else { found_debug_sync = true; }
+ }
}
}
- panic!("Couldn't find mutex construction callsite");
+ let symbol = symbol_after_latest_debug_sync.expect("Couldn't find lock call symbol");
+ (format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()), symbol.colno())
}
impl LockMetadata {
#[cfg(feature = "backtrace")]
{
let (lock_constr_location, lock_constr_colno) =
- get_construction_location(&res._lock_construction_bt);
+ locate_call_symbol(&res._lock_construction_bt);
LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } });
let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap();
match locks.entry(lock_constr_location) {
hash_map::Entry::Occupied(e) => {
assert_eq!(lock_constr_colno,
- get_construction_location(&e.get()._lock_construction_bt).1,
+ locate_call_symbol(&e.get()._lock_construction_bt).1,
"Because Windows doesn't support column number results in backtraces, we cannot construct two mutexes on the same line or we risk lockorder detection false positives.");
return Arc::clone(e.get())
},
fn pre_lock(this: &Arc<LockMetadata>, _double_lock_self_allowed: bool) {
LOCKS_HELD.with(|held| {
- // For each lock which is currently locked, check that no lock's locked-before
- // set includes the lock we're about to lock, which would imply a lockorder
- // inversion.
+ // For each lock that is currently held, check that no lock's `locked_before` set
+ // includes the lock we're about to hold, which would imply a lockorder inversion.
for (locked_idx, _locked) in held.borrow().iter() {
if *locked_idx == this.lock_idx {
// Note that with `feature = "backtrace"` set, we may be looking at different
#[cfg(feature = "backtrace")]
debug_assert!(_double_lock_self_allowed,
"Tried to acquire a lock while it was held!\nLock constructed at {}",
- get_construction_location(&this._lock_construction_bt).0);
+ locate_call_symbol(&this._lock_construction_bt).0);
#[cfg(not(feature = "backtrace"))]
panic!("Tried to acquire a lock while it was held!");
}
}
for (_locked_idx, locked) in held.borrow().iter() {
for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() {
- if *locked_dep_idx == this.lock_idx && *locked_dep_idx != locked.lock_idx {
- #[cfg(feature = "backtrace")]
- panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
- get_construction_location(&this._lock_construction_bt).0,
- this.lock_idx, this._lock_construction_bt,
- get_construction_location(&locked._lock_construction_bt).0,
- locked.lock_idx, locked._lock_construction_bt,
- _locked_dep._lockdep_trace);
- #[cfg(not(feature = "backtrace"))]
- panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
+ let is_dep_this_lock = *locked_dep_idx == this.lock_idx;
+ let has_same_construction = *locked_dep_idx == locked.lock_idx;
+ if is_dep_this_lock && !has_same_construction {
+ #[allow(unused_mut, unused_assignments)]
+ let mut has_same_callsite = false;
+ #[cfg(feature = "backtrace")] {
+ has_same_callsite = _double_lock_self_allowed &&
+ locate_call_symbol(&_locked_dep._lockdep_trace) ==
+ locate_call_symbol(&Backtrace::new());
+ }
+ if !has_same_callsite {
+ #[cfg(feature = "backtrace")]
+ panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
+ locate_call_symbol(&this._lock_construction_bt).0,
+ this.lock_idx, this._lock_construction_bt,
+ locate_call_symbol(&locked._lock_construction_bt).0,
+ locked.lock_idx, locked._lock_construction_bt,
+ _locked_dep._lockdep_trace);
+ #[cfg(not(feature = "backtrace"))]
+ panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
+ }
}
}
// Insert any already-held locks in our locked-before set.
pub use ::alloc::sync::Arc;
use core::ops::{Deref, DerefMut};
-use core::time::Duration;
use core::cell::{RefCell, Ref, RefMut};
use super::{LockTestExt, LockHeldState};
pub type LockResult<Guard> = Result<Guard, ()>;
-pub struct Condvar {}
-
-impl Condvar {
- pub fn new() -> Condvar {
- Condvar { }
- }
-
- pub fn wait<'a, T>(&'a self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
- Ok(guard)
- }
-
- #[allow(unused)]
- pub fn wait_timeout<'a, T>(&'a self, guard: MutexGuard<'a, T>, _dur: Duration) -> LockResult<(MutexGuard<'a, T>, ())> {
- Ok((guard, ()))
- }
-
- pub fn notify_all(&self) {}
-}
-
pub struct Mutex<T: ?Sized> {
inner: RefCell<T>
}
///
/// Default value: false.
///
- /// [`Event::OpenChannelRequest`]: crate::util::events::Event::OpenChannelRequest
+ /// [`Event::OpenChannelRequest`]: crate::events::Event::OpenChannelRequest
/// [`msgs::OpenChannel`]: crate::ln::msgs::OpenChannel
/// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
pub manually_accept_inbound_channels: bool,
/// Default value: false.
///
/// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid
- /// [`Event::HTLCIntercepted`]: crate::util::events::Event::HTLCIntercepted
+ /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted
pub accept_intercept_htlcs: bool,
}
use bitcoin::secp256k1::{SecretKey, PublicKey};
use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
#[cfg(anchors)]
-use crate::util::events::HTLCDescriptor;
+use crate::events::bump_transaction::HTLCDescriptor;
use crate::util::ser::{Writeable, Writer};
use crate::io::Error;
+++ /dev/null
-// This file is Copyright its original authors, visible in version control
-// history.
-//
-// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
-// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
-// You may not use this file except in accordance with one or both of these
-// licenses.
-
-//! Events are returned from various bits in the library which indicate some action must be taken
-//! by the client.
-//!
-//! Because we don't have a built-in runtime, it's up to the client to call events at a time in the
-//! future, as well as generate and broadcast funding transactions handle payment preimages and a
-//! few other things.
-
-use crate::chain::keysinterface::SpendableOutputDescriptor;
-#[cfg(anchors)]
-use crate::ln::chan_utils::{self, ChannelTransactionParameters, HTLCOutputInCommitment};
-use crate::ln::channelmanager::{InterceptId, PaymentId};
-use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS;
-use crate::ln::features::ChannelTypeFeatures;
-use crate::ln::msgs;
-use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
-use crate::routing::gossip::NetworkUpdate;
-use crate::util::errors::APIError;
-use crate::util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, RequiredWrapper, UpgradableRequired, WithoutLength};
-use crate::routing::router::{RouteHop, RouteParameters};
-
-use bitcoin::{PackedLockTime, Transaction};
-#[cfg(anchors)]
-use bitcoin::{OutPoint, Txid, TxIn, TxOut, Witness};
-use bitcoin::blockdata::script::Script;
-use bitcoin::hashes::Hash;
-use bitcoin::hashes::sha256::Hash as Sha256;
-use bitcoin::secp256k1::PublicKey;
-#[cfg(anchors)]
-use bitcoin::secp256k1::{self, Secp256k1};
-#[cfg(anchors)]
-use bitcoin::secp256k1::ecdsa::Signature;
-use crate::io;
-use crate::prelude::*;
-use core::time::Duration;
-use core::ops::Deref;
-use crate::sync::Arc;
-
-/// Some information provided on receipt of payment depends on whether the payment received is a
-/// spontaneous payment or a "conventional" lightning payment that's paying an invoice.
-#[derive(Clone, Debug, PartialEq, Eq)]
-pub enum PaymentPurpose {
- /// Information for receiving a payment that we generated an invoice for.
- InvoicePayment {
- /// The preimage to the payment_hash, if the payment hash (and secret) were fetched via
- /// [`ChannelManager::create_inbound_payment`]. If provided, this can be handed directly to
- /// [`ChannelManager::claim_funds`].
- ///
- /// [`ChannelManager::create_inbound_payment`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment
- /// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds
- payment_preimage: Option<PaymentPreimage>,
- /// The "payment secret". This authenticates the sender to the recipient, preventing a
- /// number of deanonymization attacks during the routing process.
- /// It is provided here for your reference, however its accuracy is enforced directly by
- /// [`ChannelManager`] using the values you previously provided to
- /// [`ChannelManager::create_inbound_payment`] or
- /// [`ChannelManager::create_inbound_payment_for_hash`].
- ///
- /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
- /// [`ChannelManager::create_inbound_payment`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment
- /// [`ChannelManager::create_inbound_payment_for_hash`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment_for_hash
- payment_secret: PaymentSecret,
- },
- /// Because this is a spontaneous payment, the payer generated their own preimage rather than us
- /// (the payee) providing a preimage.
- SpontaneousPayment(PaymentPreimage),
-}
-
-impl_writeable_tlv_based_enum!(PaymentPurpose,
- (0, InvoicePayment) => {
- (0, payment_preimage, option),
- (2, payment_secret, required),
- };
- (2, SpontaneousPayment)
-);
-
-/// When the payment path failure took place and extra details about it. [`PathFailure::OnPath`] may
-/// contain a [`NetworkUpdate`] that needs to be applied to the [`NetworkGraph`].
-///
-/// [`NetworkUpdate`]: crate::routing::gossip::NetworkUpdate
-/// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub enum PathFailure {
- /// We failed to initially send the payment and no HTLC was committed to. Contains the relevant
- /// error.
- InitialSend {
- /// The error surfaced from initial send.
- err: APIError,
- },
- /// A hop on the path failed to forward our payment.
- OnPath {
- /// If present, this [`NetworkUpdate`] should be applied to the [`NetworkGraph`] so that routing
- /// decisions can take into account the update.
- ///
- /// [`NetworkUpdate`]: crate::routing::gossip::NetworkUpdate
- /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph
- network_update: Option<NetworkUpdate>,
- },
-}
-
-impl_writeable_tlv_based_enum_upgradable!(PathFailure,
- (0, OnPath) => {
- (0, network_update, upgradable_option),
- },
- (2, InitialSend) => {
- (0, err, upgradable_required),
- },
-);
-
-#[derive(Clone, Debug, PartialEq, Eq)]
-/// The reason the channel was closed. See individual variants more details.
-pub enum ClosureReason {
- /// Closure generated from receiving a peer error message.
- ///
- /// Our counterparty may have broadcasted their latest commitment state, and we have
- /// as well.
- CounterpartyForceClosed {
- /// The error which the peer sent us.
- ///
- /// The string should be sanitized before it is used (e.g emitted to logs
- /// or printed to stdout). Otherwise, a well crafted error message may exploit
- /// a security vulnerability in the terminal emulator or the logging subsystem.
- peer_msg: String,
- },
- /// Closure generated from [`ChannelManager::force_close_channel`], called by the user.
- ///
- /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel.
- HolderForceClosed,
- /// The channel was closed after negotiating a cooperative close and we've now broadcasted
- /// the cooperative close transaction. Note the shutdown may have been initiated by us.
- //TODO: split between CounterpartyInitiated/LocallyInitiated
- CooperativeClosure,
- /// A commitment transaction was confirmed on chain, closing the channel. Most likely this
- /// commitment transaction came from our counterparty, but it may also have come from
- /// a copy of our own `ChannelMonitor`.
- CommitmentTxConfirmed,
- /// The funding transaction failed to confirm in a timely manner on an inbound channel.
- FundingTimedOut,
- /// Closure generated from processing an event, likely a HTLC forward/relay/reception.
- ProcessingError {
- /// A developer-readable error message which we generated.
- err: String,
- },
- /// The peer disconnected prior to funding completing. In this case the spec mandates that we
- /// forget the channel entirely - we can attempt again if the peer reconnects.
- ///
- /// This includes cases where we restarted prior to funding completion, including prior to the
- /// initial [`ChannelMonitor`] persistence completing.
- ///
- /// In LDK versions prior to 0.0.107 this could also occur if we were unable to connect to the
- /// peer because of mutual incompatibility between us and our channel counterparty.
- ///
- /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
- DisconnectedPeer,
- /// Closure generated from `ChannelManager::read` if the [`ChannelMonitor`] is newer than
- /// the [`ChannelManager`] deserialized.
- ///
- /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
- /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
- OutdatedChannelManager
-}
-
-impl core::fmt::Display for ClosureReason {
- fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
- f.write_str("Channel closed because ")?;
- match self {
- ClosureReason::CounterpartyForceClosed { peer_msg } => {
- f.write_str("counterparty force-closed with message ")?;
- f.write_str(&peer_msg)
- },
- ClosureReason::HolderForceClosed => f.write_str("user manually force-closed the channel"),
- ClosureReason::CooperativeClosure => f.write_str("the channel was cooperatively closed"),
- ClosureReason::CommitmentTxConfirmed => f.write_str("commitment or closing transaction was confirmed on chain."),
- ClosureReason::FundingTimedOut => write!(f, "funding transaction failed to confirm within {} blocks", FUNDING_CONF_DEADLINE_BLOCKS),
- ClosureReason::ProcessingError { err } => {
- f.write_str("of an exception: ")?;
- f.write_str(&err)
- },
- ClosureReason::DisconnectedPeer => f.write_str("the peer disconnected prior to the channel being funded"),
- ClosureReason::OutdatedChannelManager => f.write_str("the ChannelManager read from disk was stale compared to ChannelMonitor(s)"),
- }
- }
-}
-
-impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
- (0, CounterpartyForceClosed) => { (1, peer_msg, required) },
- (1, FundingTimedOut) => {},
- (2, HolderForceClosed) => {},
- (6, CommitmentTxConfirmed) => {},
- (4, CooperativeClosure) => {},
- (8, ProcessingError) => { (1, err, required) },
- (10, DisconnectedPeer) => {},
- (12, OutdatedChannelManager) => {},
-);
-
-/// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`].
-#[derive(Clone, Debug, PartialEq, Eq)]
-pub enum HTLCDestination {
- /// We tried forwarding to a channel but failed to do so. An example of such an instance is when
- /// there is insufficient capacity in our outbound channel.
- NextHopChannel {
- /// The `node_id` of the next node. For backwards compatibility, this field is
- /// marked as optional, versions prior to 0.0.110 may not always be able to provide
- /// counterparty node information.
- node_id: Option<PublicKey>,
- /// The outgoing `channel_id` between us and the next node.
- channel_id: [u8; 32],
- },
- /// Scenario where we are unsure of the next node to forward the HTLC to.
- UnknownNextHop {
- /// Short channel id we are requesting to forward an HTLC to.
- requested_forward_scid: u64,
- },
- /// We couldn't forward to the outgoing scid. An example would be attempting to send a duplicate
- /// intercept HTLC.
- InvalidForward {
- /// Short channel id we are requesting to forward an HTLC to.
- requested_forward_scid: u64
- },
- /// Failure scenario where an HTLC may have been forwarded to be intended for us,
- /// but is invalid for some reason, so we reject it.
- ///
- /// Some of the reasons may include:
- /// * HTLC Timeouts
- /// * Expected MPP amount to claim does not equal HTLC total
- /// * Claimable amount does not match expected amount
- FailedPayment {
- /// The payment hash of the payment we attempted to process.
- payment_hash: PaymentHash
- },
-}
-
-impl_writeable_tlv_based_enum_upgradable!(HTLCDestination,
- (0, NextHopChannel) => {
- (0, node_id, required),
- (2, channel_id, required),
- },
- (1, InvalidForward) => {
- (0, requested_forward_scid, required),
- },
- (2, UnknownNextHop) => {
- (0, requested_forward_scid, required),
- },
- (4, FailedPayment) => {
- (0, payment_hash, required),
- },
-);
-
-#[cfg(anchors)]
-/// A descriptor used to sign for a commitment transaction's anchor output.
-#[derive(Clone, Debug)]
-pub struct AnchorDescriptor {
- /// A unique identifier used along with `channel_value_satoshis` to re-derive the
- /// [`InMemorySigner`] required to sign `input`.
- ///
- /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
- pub channel_keys_id: [u8; 32],
- /// The value in satoshis of the channel we're attempting to spend the anchor output of. This is
- /// used along with `channel_keys_id` to re-derive the [`InMemorySigner`] required to sign
- /// `input`.
- ///
- /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
- pub channel_value_satoshis: u64,
- /// The transaction input's outpoint corresponding to the commitment transaction's anchor
- /// output.
- pub outpoint: OutPoint,
-}
-
-#[cfg(anchors)]
-/// A descriptor used to sign for a commitment transaction's HTLC output.
-#[derive(Clone, Debug)]
-pub struct HTLCDescriptor {
- /// A unique identifier used along with `channel_value_satoshis` to re-derive the
- /// [`InMemorySigner`] required to sign `input`.
- ///
- /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
- pub channel_keys_id: [u8; 32],
- /// The value in satoshis of the channel we're attempting to spend the anchor output of. This is
- /// used along with `channel_keys_id` to re-derive the [`InMemorySigner`] required to sign
- /// `input`.
- ///
- /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
- pub channel_value_satoshis: u64,
- /// The necessary channel parameters that need to be provided to the re-derived
- /// [`InMemorySigner`] through [`BaseSign::provide_channel_parameters`].
- ///
- /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
- /// [`BaseSign::provide_channel_parameters`]: crate::chain::keysinterface::BaseSign::provide_channel_parameters
- pub channel_parameters: ChannelTransactionParameters,
- /// The txid of the commitment transaction in which the HTLC output lives.
- pub commitment_txid: Txid,
- /// The number of the commitment transaction in which the HTLC output lives.
- pub per_commitment_number: u64,
- /// The details of the HTLC as it appears in the commitment transaction.
- pub htlc: HTLCOutputInCommitment,
- /// The preimage, if `Some`, to claim the HTLC output with. If `None`, the timeout path must be
- /// taken.
- pub preimage: Option<PaymentPreimage>,
- /// The counterparty's signature required to spend the HTLC output.
- pub counterparty_sig: Signature
-}
-
-#[cfg(anchors)]
-impl HTLCDescriptor {
- /// Returns the unsigned transaction input spending the HTLC output in the commitment
- /// transaction.
- pub fn unsigned_tx_input(&self) -> TxIn {
- chan_utils::build_htlc_input(&self.commitment_txid, &self.htlc, true /* opt_anchors */)
- }
-
- /// Returns the delayed output created as a result of spending the HTLC output in the commitment
- /// transaction.
- pub fn tx_output<C: secp256k1::Signing + secp256k1::Verification>(
- &self, per_commitment_point: &PublicKey, secp: &Secp256k1<C>
- ) -> TxOut {
- let channel_params = self.channel_parameters.as_holder_broadcastable();
- let broadcaster_keys = channel_params.broadcaster_pubkeys();
- let counterparty_keys = channel_params.countersignatory_pubkeys();
- let broadcaster_delayed_key = chan_utils::derive_public_key(
- secp, per_commitment_point, &broadcaster_keys.delayed_payment_basepoint
- );
- let counterparty_revocation_key = chan_utils::derive_public_revocation_key(
- secp, per_commitment_point, &counterparty_keys.revocation_basepoint
- );
- chan_utils::build_htlc_output(
- 0 /* feerate_per_kw */, channel_params.contest_delay(), &self.htlc, true /* opt_anchors */,
- false /* use_non_zero_fee_anchors */, &broadcaster_delayed_key, &counterparty_revocation_key
- )
- }
-
- /// Returns the witness script of the HTLC output in the commitment transaction.
- pub fn witness_script<C: secp256k1::Signing + secp256k1::Verification>(
- &self, per_commitment_point: &PublicKey, secp: &Secp256k1<C>
- ) -> Script {
- let channel_params = self.channel_parameters.as_holder_broadcastable();
- let broadcaster_keys = channel_params.broadcaster_pubkeys();
- let counterparty_keys = channel_params.countersignatory_pubkeys();
- let broadcaster_htlc_key = chan_utils::derive_public_key(
- secp, per_commitment_point, &broadcaster_keys.htlc_basepoint
- );
- let counterparty_htlc_key = chan_utils::derive_public_key(
- secp, per_commitment_point, &counterparty_keys.htlc_basepoint
- );
- let counterparty_revocation_key = chan_utils::derive_public_revocation_key(
- secp, per_commitment_point, &counterparty_keys.revocation_basepoint
- );
- chan_utils::get_htlc_redeemscript_with_explicit_keys(
- &self.htlc, true /* opt_anchors */, &broadcaster_htlc_key, &counterparty_htlc_key,
- &counterparty_revocation_key,
- )
- }
-
- /// Returns the fully signed witness required to spend the HTLC output in the commitment
- /// transaction.
- pub fn tx_input_witness(&self, signature: &Signature, witness_script: &Script) -> Witness {
- chan_utils::build_htlc_input_witness(
- signature, &self.counterparty_sig, &self.preimage, witness_script, true /* opt_anchors */
- )
- }
-}
-
-#[cfg(anchors)]
-/// Represents the different types of transactions, originating from LDK, to be bumped.
-#[derive(Clone, Debug)]
-pub enum BumpTransactionEvent {
- /// Indicates that a channel featuring anchor outputs is to be closed by broadcasting the local
- /// commitment transaction. Since commitment transactions have a static feerate pre-agreed upon,
- /// they may need additional fees to be attached through a child transaction using the popular
- /// [Child-Pays-For-Parent](https://bitcoinops.org/en/topics/cpfp) fee bumping technique. This
- /// child transaction must include the anchor input described within `anchor_descriptor` along
- /// with additional inputs to meet the target feerate. Failure to meet the target feerate
- /// decreases the confirmation odds of the transaction package (which includes the commitment
- /// and child anchor transactions), possibly resulting in a loss of funds. Once the transaction
- /// is constructed, it must be fully signed for and broadcast by the consumer of the event
- /// along with the `commitment_tx` enclosed. Note that the `commitment_tx` must always be
- /// broadcast first, as the child anchor transaction depends on it.
- ///
- /// The consumer should be able to sign for any of the additional inputs included within the
- /// child anchor transaction. To sign its anchor input, an [`InMemorySigner`] should be
- /// re-derived through [`KeysManager::derive_channel_keys`] with the help of
- /// [`AnchorDescriptor::channel_keys_id`] and [`AnchorDescriptor::channel_value_satoshis`]. The
- /// anchor input signature can be computed with [`BaseSign::sign_holder_anchor_input`],
- /// which can then be provided to [`build_anchor_input_witness`] along with the `funding_pubkey`
- /// to obtain the full witness required to spend.
- ///
- /// It is possible to receive more than one instance of this event if a valid child anchor
- /// transaction is never broadcast or is but not with a sufficient fee to be mined. Care should
- /// be taken by the consumer of the event to ensure any future iterations of the child anchor
- /// transaction adhere to the [Replace-By-Fee
- /// rules](https://github.com/bitcoin/bitcoin/blob/master/doc/policy/mempool-replacements.md)
- /// for fee bumps to be accepted into the mempool, and eventually the chain. As the frequency of
- /// these events is not user-controlled, users may ignore/drop the event if they are no longer
- /// able to commit external confirmed funds to the child anchor transaction.
- ///
- /// The set of `pending_htlcs` on the commitment transaction to be broadcast can be inspected to
- /// determine whether a significant portion of the channel's funds are allocated to HTLCs,
- /// enabling users to make their own decisions regarding the importance of the commitment
- /// transaction's confirmation. Note that this is not required, but simply exists as an option
- /// for users to override LDK's behavior. On commitments with no HTLCs (indicated by those with
- /// an empty `pending_htlcs`), confirmation of the commitment transaction can be considered to
- /// be not urgent.
- ///
- /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
- /// [`KeysManager::derive_channel_keys`]: crate::chain::keysinterface::KeysManager::derive_channel_keys
- /// [`BaseSign::sign_holder_anchor_input`]: crate::chain::keysinterface::BaseSign::sign_holder_anchor_input
- /// [`build_anchor_input_witness`]: crate::ln::chan_utils::build_anchor_input_witness
- ChannelClose {
- /// The target feerate that the transaction package, which consists of the commitment
- /// transaction and the to-be-crafted child anchor transaction, must meet.
- package_target_feerate_sat_per_1000_weight: u32,
- /// The channel's commitment transaction to bump the fee of. This transaction should be
- /// broadcast along with the anchor transaction constructed as a result of consuming this
- /// event.
- commitment_tx: Transaction,
- /// The absolute fee in satoshis of the commitment transaction. This can be used along the
- /// with weight of the commitment transaction to determine its feerate.
- commitment_tx_fee_satoshis: u64,
- /// The descriptor to sign the anchor input of the anchor transaction constructed as a
- /// result of consuming this event.
- anchor_descriptor: AnchorDescriptor,
- /// The set of pending HTLCs on the commitment transaction that need to be resolved once the
- /// commitment transaction confirms.
- pending_htlcs: Vec<HTLCOutputInCommitment>,
- },
- /// Indicates that a channel featuring anchor outputs has unilaterally closed on-chain by a
- /// holder commitment transaction and its HTLC(s) need to be resolved on-chain. With the
- /// zero-HTLC-transaction-fee variant of anchor outputs, the pre-signed HTLC
- /// transactions have a zero fee, thus requiring additional inputs and/or outputs to be attached
- /// for a timely confirmation within the chain. These additional inputs and/or outputs must be
- /// appended to the resulting HTLC transaction to meet the target feerate. Failure to meet the
- /// target feerate decreases the confirmation odds of the transaction, possibly resulting in a
- /// loss of funds. Once the transaction meets the target feerate, it must be signed for and
- /// broadcast by the consumer of the event.
- ///
- /// The consumer should be able to sign for any of the non-HTLC inputs added to the resulting
- /// HTLC transaction. To sign HTLC inputs, an [`InMemorySigner`] should be re-derived through
- /// [`KeysManager::derive_channel_keys`] with the help of `channel_keys_id` and
- /// `channel_value_satoshis`. Each HTLC input's signature can be computed with
- /// [`BaseSign::sign_holder_htlc_transaction`], which can then be provided to
- /// [`HTLCDescriptor::tx_input_witness`] to obtain the fully signed witness required to spend.
- ///
- /// It is possible to receive more than one instance of this event if a valid HTLC transaction
- /// is never broadcast or is but not with a sufficient fee to be mined. Care should be taken by
- /// the consumer of the event to ensure any future iterations of the HTLC transaction adhere to
- /// the [Replace-By-Fee
- /// rules](https://github.com/bitcoin/bitcoin/blob/master/doc/policy/mempool-replacements.md)
- /// for fee bumps to be accepted into the mempool, and eventually the chain. As the frequency of
- /// these events is not user-controlled, users may ignore/drop the event if either they are no
- /// longer able to commit external confirmed funds to the HTLC transaction or the fee committed
- /// to the HTLC transaction is greater in value than the HTLCs being claimed.
- ///
- /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
- /// [`KeysManager::derive_channel_keys`]: crate::chain::keysinterface::KeysManager::derive_channel_keys
- /// [`BaseSign::sign_holder_htlc_transaction`]: crate::chain::keysinterface::BaseSign::sign_holder_htlc_transaction
- /// [`HTLCDescriptor::tx_input_witness`]: HTLCDescriptor::tx_input_witness
- HTLCResolution {
- target_feerate_sat_per_1000_weight: u32,
- htlc_descriptors: Vec<HTLCDescriptor>,
- },
-}
-
-/// Will be used in [`Event::HTLCIntercepted`] to identify the next hop in the HTLC's path.
-/// Currently only used in serialization for the sake of maintaining compatibility. More variants
-/// will be added for general-purpose HTLC forward intercepts as well as trampoline forward
-/// intercepts in upcoming work.
-enum InterceptNextHop {
- FakeScid {
- requested_next_hop_scid: u64,
- },
-}
-
-impl_writeable_tlv_based_enum!(InterceptNextHop,
- (0, FakeScid) => {
- (0, requested_next_hop_scid, required),
- };
-);
-
-/// An Event which you should probably take some action in response to.
-///
-/// Note that while Writeable and Readable are implemented for Event, you probably shouldn't use
-/// them directly as they don't round-trip exactly (for example FundingGenerationReady is never
-/// written as it makes no sense to respond to it after reconnecting to peers).
-#[derive(Clone, Debug, PartialEq, Eq)]
-pub enum Event {
- /// Used to indicate that the client should generate a funding transaction with the given
- /// parameters and then call [`ChannelManager::funding_transaction_generated`].
- /// Generated in [`ChannelManager`] message handling.
- /// Note that *all inputs* in the funding transaction must spend SegWit outputs or your
- /// counterparty can steal your funds!
- ///
- /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
- /// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated
- FundingGenerationReady {
- /// The random channel_id we picked which you'll need to pass into
- /// [`ChannelManager::funding_transaction_generated`].
- ///
- /// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated
- temporary_channel_id: [u8; 32],
- /// The counterparty's node_id, which you'll need to pass back into
- /// [`ChannelManager::funding_transaction_generated`].
- ///
- /// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated
- counterparty_node_id: PublicKey,
- /// The value, in satoshis, that the output should have.
- channel_value_satoshis: u64,
- /// The script which should be used in the transaction output.
- output_script: Script,
- /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`], or a
- /// random value for an inbound channel. This may be zero for objects serialized with LDK
- /// versions prior to 0.0.113.
- ///
- /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
- user_channel_id: u128,
- },
- /// Indicates that we've been offered a payment and it needs to be claimed via calling
- /// [`ChannelManager::claim_funds`] with the preimage given in [`PaymentPurpose`].
- ///
- /// Note that if the preimage is not known, you should call
- /// [`ChannelManager::fail_htlc_backwards`] or [`ChannelManager::fail_htlc_backwards_with_reason`]
- /// to free up resources for this HTLC and avoid network congestion.
- /// If you fail to call either [`ChannelManager::claim_funds`], [`ChannelManager::fail_htlc_backwards`],
- /// or [`ChannelManager::fail_htlc_backwards_with_reason`] within the HTLC's timeout, the HTLC will be
- /// automatically failed.
- ///
- /// # Note
- /// LDK will not stop an inbound payment from being paid multiple times, so multiple
- /// `PaymentClaimable` events may be generated for the same payment.
- ///
- /// # Note
- /// This event used to be called `PaymentReceived` in LDK versions 0.0.112 and earlier.
- ///
- /// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds
- /// [`ChannelManager::fail_htlc_backwards`]: crate::ln::channelmanager::ChannelManager::fail_htlc_backwards
- /// [`ChannelManager::fail_htlc_backwards_with_reason`]: crate::ln::channelmanager::ChannelManager::fail_htlc_backwards_with_reason
- PaymentClaimable {
- /// The node that will receive the payment after it has been claimed.
- /// This is useful to identify payments received via [phantom nodes].
- /// This field will always be filled in when the event was generated by LDK versions
- /// 0.0.113 and above.
- ///
- /// [phantom nodes]: crate::chain::keysinterface::PhantomKeysManager
- receiver_node_id: Option<PublicKey>,
- /// The hash for which the preimage should be handed to the ChannelManager. Note that LDK will
- /// not stop you from registering duplicate payment hashes for inbound payments.
- payment_hash: PaymentHash,
- /// The value, in thousandths of a satoshi, that this payment is for.
- amount_msat: u64,
- /// Information for claiming this received payment, based on whether the purpose of the
- /// payment is to pay an invoice or to send a spontaneous payment.
- purpose: PaymentPurpose,
- /// The `channel_id` indicating over which channel we received the payment.
- via_channel_id: Option<[u8; 32]>,
- /// The `user_channel_id` indicating over which channel we received the payment.
- via_user_channel_id: Option<u128>,
- },
- /// Indicates a payment has been claimed and we've received money!
- ///
- /// This most likely occurs when [`ChannelManager::claim_funds`] has been called in response
- /// to an [`Event::PaymentClaimable`]. However, if we previously crashed during a
- /// [`ChannelManager::claim_funds`] call you may see this event without a corresponding
- /// [`Event::PaymentClaimable`] event.
- ///
- /// # Note
- /// LDK will not stop an inbound payment from being paid multiple times, so multiple
- /// `PaymentClaimable` events may be generated for the same payment. If you then call
- /// [`ChannelManager::claim_funds`] twice for the same [`Event::PaymentClaimable`] you may get
- /// multiple `PaymentClaimed` events.
- ///
- /// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds
- PaymentClaimed {
- /// The node that received the payment.
- /// This is useful to identify payments which were received via [phantom nodes].
- /// This field will always be filled in when the event was generated by LDK versions
- /// 0.0.113 and above.
- ///
- /// [phantom nodes]: crate::chain::keysinterface::PhantomKeysManager
- receiver_node_id: Option<PublicKey>,
- /// The payment hash of the claimed payment. Note that LDK will not stop you from
- /// registering duplicate payment hashes for inbound payments.
- payment_hash: PaymentHash,
- /// The value, in thousandths of a satoshi, that this payment is for.
- amount_msat: u64,
- /// The purpose of the claimed payment, i.e. whether the payment was for an invoice or a
- /// spontaneous payment.
- purpose: PaymentPurpose,
- },
- /// Indicates an outbound payment we made succeeded (i.e. it made it all the way to its target
- /// and we got back the payment preimage for it).
- ///
- /// Note for MPP payments: in rare cases, this event may be preceded by a `PaymentPathFailed`
- /// event. In this situation, you SHOULD treat this payment as having succeeded.
- PaymentSent {
- /// The id returned by [`ChannelManager::send_payment`].
- ///
- /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- payment_id: Option<PaymentId>,
- /// The preimage to the hash given to ChannelManager::send_payment.
- /// Note that this serves as a payment receipt, if you wish to have such a thing, you must
- /// store it somehow!
- payment_preimage: PaymentPreimage,
- /// The hash that was given to [`ChannelManager::send_payment`].
- ///
- /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- payment_hash: PaymentHash,
- /// The total fee which was spent at intermediate hops in this payment, across all paths.
- ///
- /// Note that, like [`Route::get_total_fees`] this does *not* include any potential
- /// overpayment to the recipient node.
- ///
- /// If the recipient or an intermediate node misbehaves and gives us free money, this may
- /// overstate the amount paid, though this is unlikely.
- ///
- /// [`Route::get_total_fees`]: crate::routing::router::Route::get_total_fees
- fee_paid_msat: Option<u64>,
- },
- /// Indicates an outbound payment failed. Individual [`Event::PaymentPathFailed`] events
- /// provide failure information for each path attempt in the payment, including retries.
- ///
- /// This event is provided once there are no further pending HTLCs for the payment and the
- /// payment is no longer retryable, due either to the [`Retry`] provided or
- /// [`ChannelManager::abandon_payment`] having been called for the corresponding payment.
- ///
- /// [`Retry`]: crate::ln::channelmanager::Retry
- /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
- PaymentFailed {
- /// The id returned by [`ChannelManager::send_payment`] and used with
- /// [`ChannelManager::abandon_payment`].
- ///
- /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
- payment_id: PaymentId,
- /// The hash that was given to [`ChannelManager::send_payment`].
- ///
- /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- payment_hash: PaymentHash,
- },
- /// Indicates that a path for an outbound payment was successful.
- ///
- /// Always generated after [`Event::PaymentSent`] and thus useful for scoring channels. See
- /// [`Event::PaymentSent`] for obtaining the payment preimage.
- PaymentPathSuccessful {
- /// The id returned by [`ChannelManager::send_payment`].
- ///
- /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- payment_id: PaymentId,
- /// The hash that was given to [`ChannelManager::send_payment`].
- ///
- /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- payment_hash: Option<PaymentHash>,
- /// The payment path that was successful.
- ///
- /// May contain a closed channel if the HTLC sent along the path was fulfilled on chain.
- path: Vec<RouteHop>,
- },
- /// Indicates an outbound HTLC we sent failed, likely due to an intermediary node being unable to
- /// handle the HTLC.
- ///
- /// Note that this does *not* indicate that all paths for an MPP payment have failed, see
- /// [`Event::PaymentFailed`].
- ///
- /// See [`ChannelManager::abandon_payment`] for giving up on this payment before its retries have
- /// been exhausted.
- ///
- /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
- PaymentPathFailed {
- /// The id returned by [`ChannelManager::send_payment`] and used with
- /// [`ChannelManager::abandon_payment`].
- ///
- /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
- payment_id: Option<PaymentId>,
- /// The hash that was given to [`ChannelManager::send_payment`].
- ///
- /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- payment_hash: PaymentHash,
- /// Indicates the payment was rejected for some reason by the recipient. This implies that
- /// the payment has failed, not just the route in question. If this is not set, the payment may
- /// be retried via a different route.
- payment_failed_permanently: bool,
- /// Extra error details based on the failure type. May contain an update that needs to be
- /// applied to the [`NetworkGraph`].
- ///
- /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph
- failure: PathFailure,
- /// The payment path that failed.
- path: Vec<RouteHop>,
- /// The channel responsible for the failed payment path.
- ///
- /// Note that for route hints or for the first hop in a path this may be an SCID alias and
- /// may not refer to a channel in the public network graph. These aliases may also collide
- /// with channels in the public network graph.
- ///
- /// If this is `Some`, then the corresponding channel should be avoided when the payment is
- /// retried. May be `None` for older [`Event`] serializations.
- short_channel_id: Option<u64>,
- /// Parameters used by LDK to compute a new [`Route`] when retrying the failed payment path.
- ///
- /// [`Route`]: crate::routing::router::Route
- retry: Option<RouteParameters>,
-#[cfg(test)]
- error_code: Option<u16>,
-#[cfg(test)]
- error_data: Option<Vec<u8>>,
- },
- /// Indicates that a probe payment we sent returned successful, i.e., only failed at the destination.
- ProbeSuccessful {
- /// The id returned by [`ChannelManager::send_probe`].
- ///
- /// [`ChannelManager::send_probe`]: crate::ln::channelmanager::ChannelManager::send_probe
- payment_id: PaymentId,
- /// The hash generated by [`ChannelManager::send_probe`].
- ///
- /// [`ChannelManager::send_probe`]: crate::ln::channelmanager::ChannelManager::send_probe
- payment_hash: PaymentHash,
- /// The payment path that was successful.
- path: Vec<RouteHop>,
- },
- /// Indicates that a probe payment we sent failed at an intermediary node on the path.
- ProbeFailed {
- /// The id returned by [`ChannelManager::send_probe`].
- ///
- /// [`ChannelManager::send_probe`]: crate::ln::channelmanager::ChannelManager::send_probe
- payment_id: PaymentId,
- /// The hash generated by [`ChannelManager::send_probe`].
- ///
- /// [`ChannelManager::send_probe`]: crate::ln::channelmanager::ChannelManager::send_probe
- payment_hash: PaymentHash,
- /// The payment path that failed.
- path: Vec<RouteHop>,
- /// The channel responsible for the failed probe.
- ///
- /// Note that for route hints or for the first hop in a path this may be an SCID alias and
- /// may not refer to a channel in the public network graph. These aliases may also collide
- /// with channels in the public network graph.
- short_channel_id: Option<u64>,
- },
- /// Used to indicate that [`ChannelManager::process_pending_htlc_forwards`] should be called at
- /// a time in the future.
- ///
- /// [`ChannelManager::process_pending_htlc_forwards`]: crate::ln::channelmanager::ChannelManager::process_pending_htlc_forwards
- PendingHTLCsForwardable {
- /// The minimum amount of time that should be waited prior to calling
- /// process_pending_htlc_forwards. To increase the effort required to correlate payments,
- /// you should wait a random amount of time in roughly the range (now + time_forwardable,
- /// now + 5*time_forwardable).
- time_forwardable: Duration,
- },
- /// Used to indicate that we've intercepted an HTLC forward. This event will only be generated if
- /// you've encoded an intercept scid in the receiver's invoice route hints using
- /// [`ChannelManager::get_intercept_scid`] and have set [`UserConfig::accept_intercept_htlcs`].
- ///
- /// [`ChannelManager::forward_intercepted_htlc`] or
- /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to this event. See
- /// their docs for more information.
- ///
- /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid
- /// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs
- /// [`ChannelManager::forward_intercepted_htlc`]: crate::ln::channelmanager::ChannelManager::forward_intercepted_htlc
- /// [`ChannelManager::fail_intercepted_htlc`]: crate::ln::channelmanager::ChannelManager::fail_intercepted_htlc
- HTLCIntercepted {
- /// An id to help LDK identify which HTLC is being forwarded or failed.
- intercept_id: InterceptId,
- /// The fake scid that was programmed as the next hop's scid, generated using
- /// [`ChannelManager::get_intercept_scid`].
- ///
- /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid
- requested_next_hop_scid: u64,
- /// The payment hash used for this HTLC.
- payment_hash: PaymentHash,
- /// How many msats were received on the inbound edge of this HTLC.
- inbound_amount_msat: u64,
- /// How many msats the payer intended to route to the next node. Depending on the reason you are
- /// intercepting this payment, you might take a fee by forwarding less than this amount.
- ///
- /// Note that LDK will NOT check that expected fees were factored into this value. You MUST
- /// check that whatever fee you want has been included here or subtract it as required. Further,
- /// LDK will not stop you from forwarding more than you received.
- expected_outbound_amount_msat: u64,
- },
- /// Used to indicate that an output which you should know how to spend was confirmed on chain
- /// and is now spendable.
- /// Such an output will *not* ever be spent by rust-lightning, and are not at risk of your
- /// counterparty spending them due to some kind of timeout. Thus, you need to store them
- /// somewhere and spend them when you create on-chain transactions.
- SpendableOutputs {
- /// The outputs which you should store as spendable by you.
- outputs: Vec<SpendableOutputDescriptor>,
- },
- /// This event is generated when a payment has been successfully forwarded through us and a
- /// forwarding fee earned.
- PaymentForwarded {
- /// The incoming channel between the previous node and us. This is only `None` for events
- /// generated or serialized by versions prior to 0.0.107.
- prev_channel_id: Option<[u8; 32]>,
- /// The outgoing channel between the next node and us. This is only `None` for events
- /// generated or serialized by versions prior to 0.0.107.
- next_channel_id: Option<[u8; 32]>,
- /// The fee, in milli-satoshis, which was earned as a result of the payment.
- ///
- /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC
- /// was pending, the amount the next hop claimed will have been rounded down to the nearest
- /// whole satoshi. Thus, the fee calculated here may be higher than expected as we still
- /// claimed the full value in millisatoshis from the source. In this case,
- /// `claim_from_onchain_tx` will be set.
- ///
- /// If the channel which sent us the payment has been force-closed, we will claim the funds
- /// via an on-chain transaction. In that case we do not yet know the on-chain transaction
- /// fees which we will spend and will instead set this to `None`. It is possible duplicate
- /// `PaymentForwarded` events are generated for the same payment iff `fee_earned_msat` is
- /// `None`.
- fee_earned_msat: Option<u64>,
- /// If this is `true`, the forwarded HTLC was claimed by our counterparty via an on-chain
- /// transaction.
- claim_from_onchain_tx: bool,
- },
- /// Used to indicate that a channel with the given `channel_id` is ready to
- /// be used. This event is emitted either when the funding transaction has been confirmed
- /// on-chain, or, in case of a 0conf channel, when both parties have confirmed the channel
- /// establishment.
- ChannelReady {
- /// The channel_id of the channel that is ready.
- channel_id: [u8; 32],
- /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
- /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
- /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
- /// `user_channel_id` will be randomized for an inbound channel.
- ///
- /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
- /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
- /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
- user_channel_id: u128,
- /// The node_id of the channel counterparty.
- counterparty_node_id: PublicKey,
- /// The features that this channel will operate with.
- channel_type: ChannelTypeFeatures,
- },
- /// Used to indicate that a previously opened channel with the given `channel_id` is in the
- /// process of closure.
- ChannelClosed {
- /// The channel_id of the channel which has been closed. Note that on-chain transactions
- /// resolving the channel are likely still awaiting confirmation.
- channel_id: [u8; 32],
- /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
- /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
- /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
- /// `user_channel_id` will be randomized for inbound channels.
- /// This may be zero for inbound channels serialized prior to 0.0.113 and will always be
- /// zero for objects serialized with LDK versions prior to 0.0.102.
- ///
- /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
- /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
- /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
- user_channel_id: u128,
- /// The reason the channel was closed.
- reason: ClosureReason
- },
- /// Used to indicate to the user that they can abandon the funding transaction and recycle the
- /// inputs for another purpose.
- DiscardFunding {
- /// The channel_id of the channel which has been closed.
- channel_id: [u8; 32],
- /// The full transaction received from the user
- transaction: Transaction
- },
- /// Indicates a request to open a new channel by a peer.
- ///
- /// To accept the request, call [`ChannelManager::accept_inbound_channel`]. To reject the
- /// request, call [`ChannelManager::force_close_without_broadcasting_txn`].
- ///
- /// The event is only triggered when a new open channel request is received and the
- /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true.
- ///
- /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
- /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
- /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
- OpenChannelRequest {
- /// The temporary channel ID of the channel requested to be opened.
- ///
- /// When responding to the request, the `temporary_channel_id` should be passed
- /// back to the ChannelManager through [`ChannelManager::accept_inbound_channel`] to accept,
- /// or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject.
- ///
- /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
- /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
- temporary_channel_id: [u8; 32],
- /// The node_id of the counterparty requesting to open the channel.
- ///
- /// When responding to the request, the `counterparty_node_id` should be passed
- /// back to the `ChannelManager` through [`ChannelManager::accept_inbound_channel`] to
- /// accept the request, or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject the
- /// request.
- ///
- /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
- /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
- counterparty_node_id: PublicKey,
- /// The channel value of the requested channel.
- funding_satoshis: u64,
- /// Our starting balance in the channel if the request is accepted, in milli-satoshi.
- push_msat: u64,
- /// The features that this channel will operate with. If you reject the channel, a
- /// well-behaved counterparty may automatically re-attempt the channel with a new set of
- /// feature flags.
- ///
- /// Note that if [`ChannelTypeFeatures::supports_scid_privacy`] returns true on this type,
- /// the resulting [`ChannelManager`] will not be readable by versions of LDK prior to
- /// 0.0.106.
- ///
- /// Furthermore, note that if [`ChannelTypeFeatures::supports_zero_conf`] returns true on this type,
- /// the resulting [`ChannelManager`] will not be readable by versions of LDK prior to
- /// 0.0.107. Channels setting this type also need to get manually accepted via
- /// [`crate::ln::channelmanager::ChannelManager::accept_inbound_channel_from_trusted_peer_0conf`],
- /// or will be rejected otherwise.
- ///
- /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
- channel_type: ChannelTypeFeatures,
- },
- /// Indicates that the HTLC was accepted, but could not be processed when or after attempting to
- /// forward it.
- ///
- /// Some scenarios where this event may be sent include:
- /// * Insufficient capacity in the outbound channel
- /// * While waiting to forward the HTLC, the channel it is meant to be forwarded through closes
- /// * When an unknown SCID is requested for forwarding a payment.
- /// * Claiming an amount for an MPP payment that exceeds the HTLC total
- /// * The HTLC has timed out
- ///
- /// This event, however, does not get generated if an HTLC fails to meet the forwarding
- /// requirements (i.e. insufficient fees paid, or a CLTV that is too soon).
- HTLCHandlingFailed {
- /// The channel over which the HTLC was received.
- prev_channel_id: [u8; 32],
- /// Destination of the HTLC that failed to be processed.
- failed_next_destination: HTLCDestination,
- },
- #[cfg(anchors)]
- /// Indicates that a transaction originating from LDK needs to have its fee bumped. This event
- /// requires confirmed external funds to be readily available to spend.
- ///
- /// LDK does not currently generate this event. It is limited to the scope of channels with
- /// anchor outputs, which will be introduced in a future release.
- BumpTransaction(BumpTransactionEvent),
-}
-
-impl Writeable for Event {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
- match self {
- &Event::FundingGenerationReady { .. } => {
- 0u8.write(writer)?;
- // We never write out FundingGenerationReady events as, upon disconnection, peers
- // drop any channels which have not yet exchanged funding_signed.
- },
- &Event::PaymentClaimable { ref payment_hash, ref amount_msat, ref purpose, ref receiver_node_id, ref via_channel_id, ref via_user_channel_id } => {
- 1u8.write(writer)?;
- let mut payment_secret = None;
- let payment_preimage;
- match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage: preimage, payment_secret: secret } => {
- payment_secret = Some(secret);
- payment_preimage = *preimage;
- },
- PaymentPurpose::SpontaneousPayment(preimage) => {
- payment_preimage = Some(*preimage);
- }
- }
- write_tlv_fields!(writer, {
- (0, payment_hash, required),
- (1, receiver_node_id, option),
- (2, payment_secret, option),
- (3, via_channel_id, option),
- (4, amount_msat, required),
- (5, via_user_channel_id, option),
- (6, 0u64, required), // user_payment_id required for compatibility with 0.0.103 and earlier
- (8, payment_preimage, option),
- });
- },
- &Event::PaymentSent { ref payment_id, ref payment_preimage, ref payment_hash, ref fee_paid_msat } => {
- 2u8.write(writer)?;
- write_tlv_fields!(writer, {
- (0, payment_preimage, required),
- (1, payment_hash, required),
- (3, payment_id, option),
- (5, fee_paid_msat, option),
- });
- },
- &Event::PaymentPathFailed {
- ref payment_id, ref payment_hash, ref payment_failed_permanently, ref failure,
- ref path, ref short_channel_id, ref retry,
- #[cfg(test)]
- ref error_code,
- #[cfg(test)]
- ref error_data,
- } => {
- 3u8.write(writer)?;
- #[cfg(test)]
- error_code.write(writer)?;
- #[cfg(test)]
- error_data.write(writer)?;
- write_tlv_fields!(writer, {
- (0, payment_hash, required),
- (1, None::<NetworkUpdate>, option), // network_update in LDK versions prior to 0.0.114
- (2, payment_failed_permanently, required),
- (3, false, required), // all_paths_failed in LDK versions prior to 0.0.114
- (5, *path, vec_type),
- (7, short_channel_id, option),
- (9, retry, option),
- (11, payment_id, option),
- (13, failure, required),
- });
- },
- &Event::PendingHTLCsForwardable { time_forwardable: _ } => {
- 4u8.write(writer)?;
- // Note that we now ignore these on the read end as we'll re-generate them in
- // ChannelManager, we write them here only for backwards compatibility.
- },
- &Event::SpendableOutputs { ref outputs } => {
- 5u8.write(writer)?;
- write_tlv_fields!(writer, {
- (0, WithoutLength(outputs), required),
- });
- },
- &Event::HTLCIntercepted { requested_next_hop_scid, payment_hash, inbound_amount_msat, expected_outbound_amount_msat, intercept_id } => {
- 6u8.write(writer)?;
- let intercept_scid = InterceptNextHop::FakeScid { requested_next_hop_scid };
- write_tlv_fields!(writer, {
- (0, intercept_id, required),
- (2, intercept_scid, required),
- (4, payment_hash, required),
- (6, inbound_amount_msat, required),
- (8, expected_outbound_amount_msat, required),
- });
- }
- &Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => {
- 7u8.write(writer)?;
- write_tlv_fields!(writer, {
- (0, fee_earned_msat, option),
- (1, prev_channel_id, option),
- (2, claim_from_onchain_tx, required),
- (3, next_channel_id, option),
- });
- },
- &Event::ChannelClosed { ref channel_id, ref user_channel_id, ref reason } => {
- 9u8.write(writer)?;
- // `user_channel_id` used to be a single u64 value. In order to remain backwards
- // compatible with versions prior to 0.0.113, the u128 is serialized as two
- // separate u64 values.
- let user_channel_id_low = *user_channel_id as u64;
- let user_channel_id_high = (*user_channel_id >> 64) as u64;
- write_tlv_fields!(writer, {
- (0, channel_id, required),
- (1, user_channel_id_low, required),
- (2, reason, required),
- (3, user_channel_id_high, required),
- });
- },
- &Event::DiscardFunding { ref channel_id, ref transaction } => {
- 11u8.write(writer)?;
- write_tlv_fields!(writer, {
- (0, channel_id, required),
- (2, transaction, required)
- })
- },
- &Event::PaymentPathSuccessful { ref payment_id, ref payment_hash, ref path } => {
- 13u8.write(writer)?;
- write_tlv_fields!(writer, {
- (0, payment_id, required),
- (2, payment_hash, option),
- (4, *path, vec_type)
- })
- },
- &Event::PaymentFailed { ref payment_id, ref payment_hash } => {
- 15u8.write(writer)?;
- write_tlv_fields!(writer, {
- (0, payment_id, required),
- (2, payment_hash, required),
- })
- },
- &Event::OpenChannelRequest { .. } => {
- 17u8.write(writer)?;
- // We never write the OpenChannelRequest events as, upon disconnection, peers
- // drop any channels which have not yet exchanged funding_signed.
- },
- &Event::PaymentClaimed { ref payment_hash, ref amount_msat, ref purpose, ref receiver_node_id } => {
- 19u8.write(writer)?;
- write_tlv_fields!(writer, {
- (0, payment_hash, required),
- (1, receiver_node_id, option),
- (2, purpose, required),
- (4, amount_msat, required),
- });
- },
- &Event::ProbeSuccessful { ref payment_id, ref payment_hash, ref path } => {
- 21u8.write(writer)?;
- write_tlv_fields!(writer, {
- (0, payment_id, required),
- (2, payment_hash, required),
- (4, *path, vec_type)
- })
- },
- &Event::ProbeFailed { ref payment_id, ref payment_hash, ref path, ref short_channel_id } => {
- 23u8.write(writer)?;
- write_tlv_fields!(writer, {
- (0, payment_id, required),
- (2, payment_hash, required),
- (4, *path, vec_type),
- (6, short_channel_id, option),
- })
- },
- &Event::HTLCHandlingFailed { ref prev_channel_id, ref failed_next_destination } => {
- 25u8.write(writer)?;
- write_tlv_fields!(writer, {
- (0, prev_channel_id, required),
- (2, failed_next_destination, required),
- })
- },
- #[cfg(anchors)]
- &Event::BumpTransaction(ref event)=> {
- 27u8.write(writer)?;
- match event {
- // We never write the ChannelClose|HTLCResolution events as they'll be replayed
- // upon restarting anyway if they remain unresolved.
- BumpTransactionEvent::ChannelClose { .. } => {}
- BumpTransactionEvent::HTLCResolution { .. } => {}
- }
- write_tlv_fields!(writer, {}); // Write a length field for forwards compat
- }
- &Event::ChannelReady { ref channel_id, ref user_channel_id, ref counterparty_node_id, ref channel_type } => {
- 29u8.write(writer)?;
- write_tlv_fields!(writer, {
- (0, channel_id, required),
- (2, user_channel_id, required),
- (4, counterparty_node_id, required),
- (6, channel_type, required),
- });
- },
- // Note that, going forward, all new events must only write data inside of
- // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write
- // data via `write_tlv_fields`.
- }
- Ok(())
- }
-}
-impl MaybeReadable for Event {
- fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, msgs::DecodeError> {
- match Readable::read(reader)? {
- // Note that we do not write a length-prefixed TLV for FundingGenerationReady events,
- // unlike all other events, thus we return immediately here.
- 0u8 => Ok(None),
- 1u8 => {
- let f = || {
- let mut payment_hash = PaymentHash([0; 32]);
- let mut payment_preimage = None;
- let mut payment_secret = None;
- let mut amount_msat = 0;
- let mut receiver_node_id = None;
- let mut _user_payment_id = None::<u64>; // For compatibility with 0.0.103 and earlier
- let mut via_channel_id = None;
- let mut via_user_channel_id = None;
- read_tlv_fields!(reader, {
- (0, payment_hash, required),
- (1, receiver_node_id, option),
- (2, payment_secret, option),
- (3, via_channel_id, option),
- (4, amount_msat, required),
- (5, via_user_channel_id, option),
- (6, _user_payment_id, option),
- (8, payment_preimage, option),
- });
- let purpose = match payment_secret {
- Some(secret) => PaymentPurpose::InvoicePayment {
- payment_preimage,
- payment_secret: secret
- },
- None if payment_preimage.is_some() => PaymentPurpose::SpontaneousPayment(payment_preimage.unwrap()),
- None => return Err(msgs::DecodeError::InvalidValue),
- };
- Ok(Some(Event::PaymentClaimable {
- receiver_node_id,
- payment_hash,
- amount_msat,
- purpose,
- via_channel_id,
- via_user_channel_id,
- }))
- };
- f()
- },
- 2u8 => {
- let f = || {
- let mut payment_preimage = PaymentPreimage([0; 32]);
- let mut payment_hash = None;
- let mut payment_id = None;
- let mut fee_paid_msat = None;
- read_tlv_fields!(reader, {
- (0, payment_preimage, required),
- (1, payment_hash, option),
- (3, payment_id, option),
- (5, fee_paid_msat, option),
- });
- if payment_hash.is_none() {
- payment_hash = Some(PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()));
- }
- Ok(Some(Event::PaymentSent {
- payment_id,
- payment_preimage,
- payment_hash: payment_hash.unwrap(),
- fee_paid_msat,
- }))
- };
- f()
- },
- 3u8 => {
- let f = || {
- #[cfg(test)]
- let error_code = Readable::read(reader)?;
- #[cfg(test)]
- let error_data = Readable::read(reader)?;
- let mut payment_hash = PaymentHash([0; 32]);
- let mut payment_failed_permanently = false;
- let mut network_update = None;
- let mut path: Option<Vec<RouteHop>> = Some(vec![]);
- let mut short_channel_id = None;
- let mut retry = None;
- let mut payment_id = None;
- let mut failure_opt = None;
- read_tlv_fields!(reader, {
- (0, payment_hash, required),
- (1, network_update, upgradable_option),
- (2, payment_failed_permanently, required),
- (5, path, vec_type),
- (7, short_channel_id, option),
- (9, retry, option),
- (11, payment_id, option),
- (13, failure_opt, upgradable_option),
- });
- let failure = failure_opt.unwrap_or_else(|| PathFailure::OnPath { network_update });
- Ok(Some(Event::PaymentPathFailed {
- payment_id,
- payment_hash,
- payment_failed_permanently,
- failure,
- path: path.unwrap(),
- short_channel_id,
- retry,
- #[cfg(test)]
- error_code,
- #[cfg(test)]
- error_data,
- }))
- };
- f()
- },
- 4u8 => Ok(None),
- 5u8 => {
- let f = || {
- let mut outputs = WithoutLength(Vec::new());
- read_tlv_fields!(reader, {
- (0, outputs, required),
- });
- Ok(Some(Event::SpendableOutputs { outputs: outputs.0 }))
- };
- f()
- },
- 6u8 => {
- let mut payment_hash = PaymentHash([0; 32]);
- let mut intercept_id = InterceptId([0; 32]);
- let mut requested_next_hop_scid = InterceptNextHop::FakeScid { requested_next_hop_scid: 0 };
- let mut inbound_amount_msat = 0;
- let mut expected_outbound_amount_msat = 0;
- read_tlv_fields!(reader, {
- (0, intercept_id, required),
- (2, requested_next_hop_scid, required),
- (4, payment_hash, required),
- (6, inbound_amount_msat, required),
- (8, expected_outbound_amount_msat, required),
- });
- let next_scid = match requested_next_hop_scid {
- InterceptNextHop::FakeScid { requested_next_hop_scid: scid } => scid
- };
- Ok(Some(Event::HTLCIntercepted {
- payment_hash,
- requested_next_hop_scid: next_scid,
- inbound_amount_msat,
- expected_outbound_amount_msat,
- intercept_id,
- }))
- },
- 7u8 => {
- let f = || {
- let mut fee_earned_msat = None;
- let mut prev_channel_id = None;
- let mut claim_from_onchain_tx = false;
- let mut next_channel_id = None;
- read_tlv_fields!(reader, {
- (0, fee_earned_msat, option),
- (1, prev_channel_id, option),
- (2, claim_from_onchain_tx, required),
- (3, next_channel_id, option),
- });
- Ok(Some(Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id }))
- };
- f()
- },
- 9u8 => {
- let f = || {
- let mut channel_id = [0; 32];
- let mut reason = UpgradableRequired(None);
- let mut user_channel_id_low_opt: Option<u64> = None;
- let mut user_channel_id_high_opt: Option<u64> = None;
- read_tlv_fields!(reader, {
- (0, channel_id, required),
- (1, user_channel_id_low_opt, option),
- (2, reason, upgradable_required),
- (3, user_channel_id_high_opt, option),
- });
-
- // `user_channel_id` used to be a single u64 value. In order to remain
- // backwards compatible with versions prior to 0.0.113, the u128 is serialized
- // as two separate u64 values.
- let user_channel_id = (user_channel_id_low_opt.unwrap_or(0) as u128) +
- ((user_channel_id_high_opt.unwrap_or(0) as u128) << 64);
-
- Ok(Some(Event::ChannelClosed { channel_id, user_channel_id, reason: _init_tlv_based_struct_field!(reason, upgradable_required) }))
- };
- f()
- },
- 11u8 => {
- let f = || {
- let mut channel_id = [0; 32];
- let mut transaction = Transaction{ version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
- read_tlv_fields!(reader, {
- (0, channel_id, required),
- (2, transaction, required),
- });
- Ok(Some(Event::DiscardFunding { channel_id, transaction } ))
- };
- f()
- },
- 13u8 => {
- let f = || {
- let mut payment_id = PaymentId([0; 32]);
- let mut payment_hash = None;
- let mut path: Option<Vec<RouteHop>> = Some(vec![]);
- read_tlv_fields!(reader, {
- (0, payment_id, required),
- (2, payment_hash, option),
- (4, path, vec_type),
- });
- Ok(Some(Event::PaymentPathSuccessful {
- payment_id,
- payment_hash,
- path: path.unwrap(),
- }))
- };
- f()
- },
- 15u8 => {
- let f = || {
- let mut payment_hash = PaymentHash([0; 32]);
- let mut payment_id = PaymentId([0; 32]);
- read_tlv_fields!(reader, {
- (0, payment_id, required),
- (2, payment_hash, required),
- });
- Ok(Some(Event::PaymentFailed {
- payment_id,
- payment_hash,
- }))
- };
- f()
- },
- 17u8 => {
- // Value 17 is used for `Event::OpenChannelRequest`.
- Ok(None)
- },
- 19u8 => {
- let f = || {
- let mut payment_hash = PaymentHash([0; 32]);
- let mut purpose = UpgradableRequired(None);
- let mut amount_msat = 0;
- let mut receiver_node_id = None;
- read_tlv_fields!(reader, {
- (0, payment_hash, required),
- (1, receiver_node_id, option),
- (2, purpose, upgradable_required),
- (4, amount_msat, required),
- });
- Ok(Some(Event::PaymentClaimed {
- receiver_node_id,
- payment_hash,
- purpose: _init_tlv_based_struct_field!(purpose, upgradable_required),
- amount_msat,
- }))
- };
- f()
- },
- 21u8 => {
- let f = || {
- let mut payment_id = PaymentId([0; 32]);
- let mut payment_hash = PaymentHash([0; 32]);
- let mut path: Option<Vec<RouteHop>> = Some(vec![]);
- read_tlv_fields!(reader, {
- (0, payment_id, required),
- (2, payment_hash, required),
- (4, path, vec_type),
- });
- Ok(Some(Event::ProbeSuccessful {
- payment_id,
- payment_hash,
- path: path.unwrap(),
- }))
- };
- f()
- },
- 23u8 => {
- let f = || {
- let mut payment_id = PaymentId([0; 32]);
- let mut payment_hash = PaymentHash([0; 32]);
- let mut path: Option<Vec<RouteHop>> = Some(vec![]);
- let mut short_channel_id = None;
- read_tlv_fields!(reader, {
- (0, payment_id, required),
- (2, payment_hash, required),
- (4, path, vec_type),
- (6, short_channel_id, option),
- });
- Ok(Some(Event::ProbeFailed {
- payment_id,
- payment_hash,
- path: path.unwrap(),
- short_channel_id,
- }))
- };
- f()
- },
- 25u8 => {
- let f = || {
- let mut prev_channel_id = [0; 32];
- let mut failed_next_destination_opt = UpgradableRequired(None);
- read_tlv_fields!(reader, {
- (0, prev_channel_id, required),
- (2, failed_next_destination_opt, upgradable_required),
- });
- Ok(Some(Event::HTLCHandlingFailed {
- prev_channel_id,
- failed_next_destination: _init_tlv_based_struct_field!(failed_next_destination_opt, upgradable_required),
- }))
- };
- f()
- },
- 27u8 => Ok(None),
- 29u8 => {
- let f = || {
- let mut channel_id = [0; 32];
- let mut user_channel_id: u128 = 0;
- let mut counterparty_node_id = RequiredWrapper(None);
- let mut channel_type = RequiredWrapper(None);
- read_tlv_fields!(reader, {
- (0, channel_id, required),
- (2, user_channel_id, required),
- (4, counterparty_node_id, required),
- (6, channel_type, required),
- });
-
- Ok(Some(Event::ChannelReady {
- channel_id,
- user_channel_id,
- counterparty_node_id: counterparty_node_id.0.unwrap(),
- channel_type: channel_type.0.unwrap()
- }))
- };
- f()
- },
- // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
- // Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt
- // reads.
- x if x % 2 == 1 => {
- // If the event is of unknown type, assume it was written with `write_tlv_fields`,
- // which prefixes the whole thing with a length BigSize. Because the event is
- // odd-type unknown, we should treat it as `Ok(None)` even if it has some TLV
- // fields that are even. Thus, we avoid using `read_tlv_fields` and simply read
- // exactly the number of bytes specified, ignoring them entirely.
- let tlv_len: BigSize = Readable::read(reader)?;
- FixedLengthReader::new(reader, tlv_len.0)
- .eat_remaining().map_err(|_| msgs::DecodeError::ShortRead)?;
- Ok(None)
- },
- _ => Err(msgs::DecodeError::InvalidValue)
- }
- }
-}
-
-/// An event generated by ChannelManager which indicates a message should be sent to a peer (or
-/// broadcast to most peers).
-/// These events are handled by PeerManager::process_events if you are using a PeerManager.
-#[derive(Clone, Debug)]
-pub enum MessageSendEvent {
- /// Used to indicate that we've accepted a channel open and should send the accept_channel
- /// message provided to the given peer.
- SendAcceptChannel {
- /// The node_id of the node which should receive this message
- node_id: PublicKey,
- /// The message which should be sent.
- msg: msgs::AcceptChannel,
- },
- /// Used to indicate that we've initiated a channel open and should send the open_channel
- /// message provided to the given peer.
- SendOpenChannel {
- /// The node_id of the node which should receive this message
- node_id: PublicKey,
- /// The message which should be sent.
- msg: msgs::OpenChannel,
- },
- /// Used to indicate that a funding_created message should be sent to the peer with the given node_id.
- SendFundingCreated {
- /// The node_id of the node which should receive this message
- node_id: PublicKey,
- /// The message which should be sent.
- msg: msgs::FundingCreated,
- },
- /// Used to indicate that a funding_signed message should be sent to the peer with the given node_id.
- SendFundingSigned {
- /// The node_id of the node which should receive this message
- node_id: PublicKey,
- /// The message which should be sent.
- msg: msgs::FundingSigned,
- },
- /// Used to indicate that a channel_ready message should be sent to the peer with the given node_id.
- SendChannelReady {
- /// The node_id of the node which should receive these message(s)
- node_id: PublicKey,
- /// The channel_ready message which should be sent.
- msg: msgs::ChannelReady,
- },
- /// Used to indicate that an announcement_signatures message should be sent to the peer with the given node_id.
- SendAnnouncementSignatures {
- /// The node_id of the node which should receive these message(s)
- node_id: PublicKey,
- /// The announcement_signatures message which should be sent.
- msg: msgs::AnnouncementSignatures,
- },
- /// Used to indicate that a series of HTLC update messages, as well as a commitment_signed
- /// message should be sent to the peer with the given node_id.
- UpdateHTLCs {
- /// The node_id of the node which should receive these message(s)
- node_id: PublicKey,
- /// The update messages which should be sent. ALL messages in the struct should be sent!
- updates: msgs::CommitmentUpdate,
- },
- /// Used to indicate that a revoke_and_ack message should be sent to the peer with the given node_id.
- SendRevokeAndACK {
- /// The node_id of the node which should receive this message
- node_id: PublicKey,
- /// The message which should be sent.
- msg: msgs::RevokeAndACK,
- },
- /// Used to indicate that a closing_signed message should be sent to the peer with the given node_id.
- SendClosingSigned {
- /// The node_id of the node which should receive this message
- node_id: PublicKey,
- /// The message which should be sent.
- msg: msgs::ClosingSigned,
- },
- /// Used to indicate that a shutdown message should be sent to the peer with the given node_id.
- SendShutdown {
- /// The node_id of the node which should receive this message
- node_id: PublicKey,
- /// The message which should be sent.
- msg: msgs::Shutdown,
- },
- /// Used to indicate that a channel_reestablish message should be sent to the peer with the given node_id.
- SendChannelReestablish {
- /// The node_id of the node which should receive this message
- node_id: PublicKey,
- /// The message which should be sent.
- msg: msgs::ChannelReestablish,
- },
- /// Used to send a channel_announcement and channel_update to a specific peer, likely on
- /// initial connection to ensure our peers know about our channels.
- SendChannelAnnouncement {
- /// The node_id of the node which should receive this message
- node_id: PublicKey,
- /// The channel_announcement which should be sent.
- msg: msgs::ChannelAnnouncement,
- /// The followup channel_update which should be sent.
- update_msg: msgs::ChannelUpdate,
- },
- /// Used to indicate that a channel_announcement and channel_update should be broadcast to all
- /// peers (except the peer with node_id either msg.contents.node_id_1 or msg.contents.node_id_2).
- ///
- /// Note that after doing so, you very likely (unless you did so very recently) want to
- /// broadcast a node_announcement (e.g. via [`PeerManager::broadcast_node_announcement`]). This
- /// ensures that any nodes which see our channel_announcement also have a relevant
- /// node_announcement, including relevant feature flags which may be important for routing
- /// through or to us.
- ///
- /// [`PeerManager::broadcast_node_announcement`]: crate::ln::peer_handler::PeerManager::broadcast_node_announcement
- BroadcastChannelAnnouncement {
- /// The channel_announcement which should be sent.
- msg: msgs::ChannelAnnouncement,
- /// The followup channel_update which should be sent.
- update_msg: Option<msgs::ChannelUpdate>,
- },
- /// Used to indicate that a channel_update should be broadcast to all peers.
- BroadcastChannelUpdate {
- /// The channel_update which should be sent.
- msg: msgs::ChannelUpdate,
- },
- /// Used to indicate that a node_announcement should be broadcast to all peers.
- BroadcastNodeAnnouncement {
- /// The node_announcement which should be sent.
- msg: msgs::NodeAnnouncement,
- },
- /// Used to indicate that a channel_update should be sent to a single peer.
- /// In contrast to [`Self::BroadcastChannelUpdate`], this is used when the channel is a
- /// private channel and we shouldn't be informing all of our peers of channel parameters.
- SendChannelUpdate {
- /// The node_id of the node which should receive this message
- node_id: PublicKey,
- /// The channel_update which should be sent.
- msg: msgs::ChannelUpdate,
- },
- /// Broadcast an error downstream to be handled
- HandleError {
- /// The node_id of the node which should receive this message
- node_id: PublicKey,
- /// The action which should be taken.
- action: msgs::ErrorAction
- },
- /// Query a peer for channels with funding transaction UTXOs in a block range.
- SendChannelRangeQuery {
- /// The node_id of this message recipient
- node_id: PublicKey,
- /// The query_channel_range which should be sent.
- msg: msgs::QueryChannelRange,
- },
- /// Request routing gossip messages from a peer for a list of channels identified by
- /// their short_channel_ids.
- SendShortIdsQuery {
- /// The node_id of this message recipient
- node_id: PublicKey,
- /// The query_short_channel_ids which should be sent.
- msg: msgs::QueryShortChannelIds,
- },
- /// Sends a reply to a channel range query. This may be one of several SendReplyChannelRange events
- /// emitted during processing of the query.
- SendReplyChannelRange {
- /// The node_id of this message recipient
- node_id: PublicKey,
- /// The reply_channel_range which should be sent.
- msg: msgs::ReplyChannelRange,
- },
- /// Sends a timestamp filter for inbound gossip. This should be sent on each new connection to
- /// enable receiving gossip messages from the peer.
- SendGossipTimestampFilter {
- /// The node_id of this message recipient
- node_id: PublicKey,
- /// The gossip_timestamp_filter which should be sent.
- msg: msgs::GossipTimestampFilter,
- },
-}
-
-/// A trait indicating an object may generate message send events
-pub trait MessageSendEventsProvider {
- /// Gets the list of pending events which were generated by previous actions, clearing the list
- /// in the process.
- fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent>;
-}
-
-/// A trait indicating an object may generate onion messages to send
-pub trait OnionMessageProvider {
- /// Gets the next pending onion message for the peer with the given node id.
- fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option<msgs::OnionMessage>;
-}
-
-/// A trait indicating an object may generate events.
-///
-/// Events are processed by passing an [`EventHandler`] to [`process_pending_events`].
-///
-/// Implementations of this trait may also feature an async version of event handling, as shown with
-/// [`ChannelManager::process_pending_events_async`] and
-/// [`ChainMonitor::process_pending_events_async`].
-///
-/// # Requirements
-///
-/// When using this trait, [`process_pending_events`] will call [`handle_event`] for each pending
-/// event since the last invocation.
-///
-/// In order to ensure no [`Event`]s are lost, implementors of this trait will persist [`Event`]s
-/// and replay any unhandled events on startup. An [`Event`] is considered handled when
-/// [`process_pending_events`] returns, thus handlers MUST fully handle [`Event`]s and persist any
-/// relevant changes to disk *before* returning.
-///
-/// Further, because an application may crash between an [`Event`] being handled and the
-/// implementor of this trait being re-serialized, [`Event`] handling must be idempotent - in
-/// effect, [`Event`]s may be replayed.
-///
-/// Note, handlers may call back into the provider and thus deadlocking must be avoided. Be sure to
-/// consult the provider's documentation on the implication of processing events and how a handler
-/// may safely use the provider (e.g., see [`ChannelManager::process_pending_events`] and
-/// [`ChainMonitor::process_pending_events`]).
-///
-/// (C-not implementable) As there is likely no reason for a user to implement this trait on their
-/// own type(s).
-///
-/// [`process_pending_events`]: Self::process_pending_events
-/// [`handle_event`]: EventHandler::handle_event
-/// [`ChannelManager::process_pending_events`]: crate::ln::channelmanager::ChannelManager#method.process_pending_events
-/// [`ChainMonitor::process_pending_events`]: crate::chain::chainmonitor::ChainMonitor#method.process_pending_events
-/// [`ChannelManager::process_pending_events_async`]: crate::ln::channelmanager::ChannelManager::process_pending_events_async
-/// [`ChainMonitor::process_pending_events_async`]: crate::chain::chainmonitor::ChainMonitor::process_pending_events_async
-pub trait EventsProvider {
- /// Processes any events generated since the last call using the given event handler.
- ///
- /// See the trait-level documentation for requirements.
- fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler;
-}
-
-/// A trait implemented for objects handling events from [`EventsProvider`].
-///
-/// An async variation also exists for implementations of [`EventsProvider`] that support async
-/// event handling. The async event handler should satisfy the generic bounds: `F:
-/// core::future::Future, H: Fn(Event) -> F`.
-pub trait EventHandler {
- /// Handles the given [`Event`].
- ///
- /// See [`EventsProvider`] for details that must be considered when implementing this method.
- fn handle_event(&self, event: Event);
-}
-
-impl<F> EventHandler for F where F: Fn(Event) {
- fn handle_event(&self, event: Event) {
- self(event)
- }
-}
-
-impl<T: EventHandler> EventHandler for Arc<T> {
- fn handle_event(&self, event: Event) {
- self.deref().handle_event(event)
- }
-}
/// actually backed by a [`HashMap`], with some additional tracking to ensure we can iterate over
/// keys in the order defined by [`Ord`].
///
-/// (C-not exported) as bindings provide alternate accessors rather than exposing maps directly.
+/// This is not exported to bindings users as bindings provide alternate accessors rather than exposing maps directly.
///
/// [`BTreeMap`]: alloc::collections::BTreeMap
#[derive(Clone, Debug, Eq)]
/// An iterator over a range of values in an [`IndexedMap`]
///
-/// (C-not exported) as bindings provide alternate accessors rather than exposing maps directly.
+/// This is not exported to bindings users as bindings provide alternate accessors rather than exposing maps directly.
pub struct Range<'a, K: Hash + Ord, V> {
inner_range: Iter<'a, K>,
map: &'a HashMap<K, V>,
/// An [`Entry`] for a key which currently has no value
///
-/// (C-not exported) as bindings provide alternate accessors rather than exposing maps directly.
+/// This is not exported to bindings users as bindings provide alternate accessors rather than exposing maps directly.
pub struct VacantEntry<'a, K: Hash + Ord, V> {
#[cfg(feature = "hashbrown")]
underlying_entry: hash_map::VacantEntry<'a, K, V, hash_map::DefaultHashBuilder>,
/// An [`Entry`] for an existing key-value pair
///
-/// (C-not exported) as bindings provide alternate accessors rather than exposing maps directly.
+/// This is not exported to bindings users as bindings provide alternate accessors rather than exposing maps directly.
pub struct OccupiedEntry<'a, K: Hash + Ord, V> {
#[cfg(feature = "hashbrown")]
underlying_entry: hash_map::OccupiedEntry<'a, K, V, hash_map::DefaultHashBuilder>,
/// A mutable reference to a position in the map. This can be used to reference, add, or update the
/// value at a fixed key.
///
-/// (C-not exported) as bindings provide alternate accessors rather than exposing maps directly.
+/// This is not exported to bindings users as bindings provide alternate accessors rather than exposing maps directly.
pub enum Entry<'a, K: Hash + Ord, V> {
/// A mutable reference to a position within the map where there is no value.
Vacant(VacantEntry<'a, K, V>),
impl<'a> Record<'a> {
/// Returns a new Record.
- /// (C-not exported) as fmt can't be used in C
+ ///
+ /// This is not exported to bindings users as fmt can't be used in C
#[inline]
pub fn new(level: Level, args: fmt::Arguments<'a>, module_path: &'static str, file: &'static str, line: u32) -> Record<'a> {
Record {
}
/// Wrapper for logging a [`PublicKey`] in hex format.
-/// (C-not exported) as fmt can't be used in C
+///
+/// This is not exported to bindings users as fmt can't be used in C
#[doc(hidden)]
pub struct DebugPubKey<'a>(pub &'a PublicKey);
impl<'a> core::fmt::Display for DebugPubKey<'a> {
}
/// Wrapper for logging byte slices in hex format.
-/// (C-not exported) as fmt can't be used in C
+///
+/// This is not exported to bindings users as fmt can't be used in C
#[doc(hidden)]
pub struct DebugBytes<'a>(pub &'a [u8]);
impl<'a> core::fmt::Display for DebugBytes<'a> {
#[macro_use]
pub mod ser_macros;
-pub mod events;
pub mod errors;
pub mod ser;
pub mod message_signing;
use core::marker::Sized;
use core::time::Duration;
use crate::ln::msgs::DecodeError;
+#[cfg(taproot)]
+use crate::ln::msgs::PartialSignatureWithNonce;
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::util::byte_utils::{be48_to_array, slice_to_be48};
/// A simplified version of [`std::io::Write`] that exists largely for backwards compatibility.
/// An impl is provided for any type that also impls [`std::io::Write`].
///
-/// (C-not exported) as we only export serialization to/from byte arrays instead
+/// This is not exported to bindings users as we only export serialization to/from byte arrays instead
pub trait Writer {
/// Writes the given buf out. See std::io::Write::write_all for more
fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error>;
/// Writer that only tracks the amount of data written - useful if you need to calculate the length
/// of some data when serialized but don't yet need the full data.
///
-/// (C-not exported) as manual TLV building is not currently supported in bindings
+/// This is not exported to bindings users as manual TLV building is not currently supported in bindings
pub struct LengthCalculatingWriter(pub usize);
impl Writer for LengthCalculatingWriter {
#[inline]
/// Essentially [`std::io::Take`] but a bit simpler and with a method to walk the underlying stream
/// forward to ensure we always consume exactly the fixed length specified.
///
-/// (C-not exported) as manual TLV building is not currently supported in bindings
+/// This is not exported to bindings users as manual TLV building is not currently supported in bindings
pub struct FixedLengthReader<R: Read> {
read: R,
bytes_read: u64,
/// A [`Read`] implementation which tracks whether any bytes have been read at all. This allows us to distinguish
/// between "EOF reached before we started" and "EOF reached mid-read".
///
-/// (C-not exported) as manual TLV building is not currently supported in bindings
+/// This is not exported to bindings users as manual TLV building is not currently supported in bindings
pub struct ReadTrackingReader<R: Read> {
read: R,
/// Returns whether we have read from this reader or not yet.
/// A trait that various LDK types implement allowing them to be written out to a [`Writer`].
///
-/// (C-not exported) as we only export serialization to/from byte arrays instead
+/// This is not exported to bindings users as we only export serialization to/from byte arrays instead
pub trait Writeable {
/// Writes `self` out to the given [`Writer`].
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error>;
/// A trait that various LDK types implement allowing them to be read in from a [`Read`].
///
-/// (C-not exported) as we only export serialization to/from byte arrays instead
+/// This is not exported to bindings users as we only export serialization to/from byte arrays instead
pub trait Readable
where Self: Sized
{
/// A trait that various higher-level LDK types implement allowing them to be read in
/// from a [`Read`] given some additional set of arguments which is required to deserialize.
///
-/// (C-not exported) as we only export serialization to/from byte arrays instead
+/// This is not exported to bindings users as we only export serialization to/from byte arrays instead
pub trait ReadableArgs<P>
where Self: Sized
{
/// A trait that various LDK types implement allowing them to (maybe) be read in from a [`Read`].
///
-/// (C-not exported) as we only export serialization to/from byte arrays instead
+/// This is not exported to bindings users as we only export serialization to/from byte arrays instead
pub trait MaybeReadable
where Self: Sized
{
/// Wrapper to read a required (non-optional) TLV record.
///
-/// (C-not exported) as manual TLV building is not currently supported in bindings
+/// This is not exported to bindings users as manual TLV building is not currently supported in bindings
pub struct RequiredWrapper<T>(pub Option<T>);
impl<T: Readable> Readable for RequiredWrapper<T> {
#[inline]
/// Wrapper to read a required (non-optional) TLV record that may have been upgraded without
/// backwards compat.
///
-/// (C-not exported) as manual TLV building is not currently supported in bindings
+/// This is not exported to bindings users as manual TLV building is not currently supported in bindings
pub struct UpgradableRequired<T: MaybeReadable>(pub Option<T>);
impl<T: MaybeReadable> MaybeReadable for UpgradableRequired<T> {
#[inline]
impl_array!(32); // for channel id & hmac
impl_array!(PUBLIC_KEY_SIZE); // for PublicKey
impl_array!(64); // for ecdsa::Signature and schnorr::Signature
+impl_array!(66); // for MuSig2 nonces
impl_array!(1300); // for OnionPacket.hop_data
impl Writeable for [u16; 8] {
/// A type for variable-length values within TLV record where the length is encoded as part of the record.
/// Used to prevent encoding the length twice.
///
-/// (C-not exported) as manual TLV building is not currently supported in bindings
+/// This is not exported to bindings users as manual TLV building is not currently supported in bindings
pub struct WithoutLength<T>(pub T);
impl Writeable for WithoutLength<&String> {
}
}
+#[cfg(taproot)]
+impl Writeable for musig2::types::PublicNonce {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.serialize().write(w)
+ }
+}
+
+#[cfg(taproot)]
+impl Readable for musig2::types::PublicNonce {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let buf: [u8; PUBLIC_KEY_SIZE * 2] = Readable::read(r)?;
+ musig2::types::PublicNonce::from_slice(&buf).map_err(|_| DecodeError::InvalidValue)
+ }
+}
+
+#[cfg(taproot)]
+impl Writeable for PartialSignatureWithNonce {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.0.serialize().write(w)?;
+ self.1.write(w)
+ }
+}
+
+#[cfg(taproot)]
+impl Readable for PartialSignatureWithNonce {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let partial_signature_buf: [u8; SECRET_KEY_SIZE] = Readable::read(r)?;
+ let partial_signature = musig2::types::PartialSignature::from_slice(&partial_signature_buf).map_err(|_| DecodeError::InvalidValue)?;
+ let public_nonce: musig2::types::PublicNonce = Readable::read(r)?;
+ Ok(PartialSignatureWithNonce(partial_signature, public_nonce))
+ }
+}
+
impl Writeable for Sha256dHash {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.write_all(&self[..])
#[cfg(test)]
mod tests {
use core::convert::TryFrom;
+ use bitcoin::secp256k1::ecdsa;
use crate::util::ser::{Readable, Hostname, Writeable};
#[test]
assert_eq!(Hostname::read(&mut buf.as_slice()).unwrap().as_str(), "test");
}
+ #[test]
+ /// Taproot will likely fill legacy signature fields with all 0s.
+ /// This test ensures that doing so won't break serialization.
+ fn null_signature_codec() {
+ let buffer = vec![0u8; 64];
+ let mut cursor = crate::io::Cursor::new(buffer.clone());
+ let signature = ecdsa::Signature::read(&mut cursor).unwrap();
+ let serialization = signature.serialize_compact();
+ assert_eq!(buffer, serialization.to_vec())
+ }
+
#[test]
fn bigsize_encoding_decoding() {
let values = vec![0, 252, 253, 65535, 65536, 4294967295, 4294967296, 18446744073709551615];
let bytes = vec![
- "00",
+ "00",
"fc",
"fd00fd",
"fdffff",
"ff0000000100000000",
"ffffffffffffffffff"
];
- for i in 0..=7 {
+ for i in 0..=7 {
let mut stream = crate::io::Cursor::new(::hex::decode(bytes[i]).unwrap());
assert_eq!(super::BigSize::read(&mut stream).unwrap().0, values[i]);
let mut stream = super::VecWriter(Vec::new());
($type:ty) => { &'a $type };
}
+#[doc(hidden)]
+#[macro_export]
macro_rules! _impl_writeable_tlv_based_enum_common {
($st: ident, $(($variant_id: expr, $variant_name: ident) =>
{$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}
$($st::$variant_name { $(ref $field),* } => {
let id: u8 = $variant_id;
id.write(writer)?;
- write_tlv_fields!(writer, {
+ $crate::write_tlv_fields!(writer, {
$(($type, *$field, $fieldty)),*
});
}),*
{$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}
),* $(,)*;
$(($tuple_variant_id: expr, $tuple_variant_name: ident)),* $(,)*) => {
- _impl_writeable_tlv_based_enum_common!($st,
+ $crate::_impl_writeable_tlv_based_enum_common!($st,
$(($variant_id, $variant_name) => {$(($type, $field, $fieldty)),*}),*;
$(($tuple_variant_id, $tuple_variant_name)),*);
// Because read_tlv_fields creates a labeled loop, we cannot call it twice
// in the same function body. Instead, we define a closure and call it.
let f = || {
- _init_and_read_tlv_fields!(reader, {
+ $crate::_init_and_read_tlv_fields!(reader, {
$(($type, $field, $fieldty)),*
});
Ok($st::$variant_name {
$(
- $field: _init_tlv_based_struct_field!($field, $fieldty)
+ $field: $crate::_init_tlv_based_struct_field!($field, $fieldty)
),*
})
};
),* $(,)*
$(;
$(($tuple_variant_id: expr, $tuple_variant_name: ident)),* $(,)*)*) => {
- _impl_writeable_tlv_based_enum_common!($st,
+ $crate::_impl_writeable_tlv_based_enum_common!($st,
$(($variant_id, $variant_name) => {$(($type, $field, $fieldty)),*}),*;
$($(($tuple_variant_id, $tuple_variant_name)),*)*);
// Because read_tlv_fields creates a labeled loop, we cannot call it twice
// in the same function body. Instead, we define a closure and call it.
let f = || {
- _init_and_read_tlv_fields!(reader, {
+ $crate::_init_and_read_tlv_fields!(reader, {
$(($type, $field, $fieldty)),*
});
Ok(Some($st::$variant_name {
$(
- $field: _init_tlv_based_struct_field!($field, $fieldty)
+ $field: $crate::_init_tlv_based_struct_field!($field, $fieldty)
),*
}))
};
//! Utilities for strings.
+use alloc::string::String;
use core::fmt;
+use crate::io::{self, Read};
+use crate::ln::msgs;
+use crate::util::ser::{Writeable, Writer, Readable};
+
+/// Struct to `Display` fields in a safe way using `PrintableString`
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct UntrustedString(pub String);
+
+impl Writeable for UntrustedString {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.0.write(w)
+ }
+}
+
+impl Readable for UntrustedString {
+ fn read<R: Read>(r: &mut R) -> Result<Self, msgs::DecodeError> {
+ let s: String = Readable::read(r)?;
+ Ok(Self(s))
+ }
+}
+
+impl fmt::Display for UntrustedString {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ PrintableString(&self.0).fmt(f)
+ }
+}
/// A string that displays only printable characters, replacing control characters with
/// [`core::char::REPLACEMENT_CHARACTER`].
use crate::chain::channelmonitor::MonitorEvent;
use crate::chain::transaction::OutPoint;
use crate::chain::keysinterface;
+use crate::events;
use crate::ln::channelmanager;
use crate::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
use crate::ln::{msgs, wire};
use crate::routing::scoring::{ChannelUsage, Score};
use crate::util::config::UserConfig;
use crate::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
-use crate::util::events;
use crate::util::logger::{Logger, Level, Record};
use crate::util::ser::{Readable, ReadableArgs, Writer, Writeable};
/// 1. belongs to the specified module and
/// 2. contains `line` in it.
/// And asserts if the number of occurrences is the same with the given `count`
- pub fn assert_log_contains(&self, module: String, line: String, count: usize) {
+ pub fn assert_log_contains(&self, module: &str, line: &str, count: usize) {
let log_entries = self.lines.lock().unwrap();
let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| {
- m == &module && l.contains(line.as_str())
+ m == module && l.contains(line)
}).map(|(_, c) | { c }).sum();
assert_eq!(l, count)
}
/// 1. belong to the specified module and
/// 2. match the given regex pattern.
/// Assert that the number of occurrences equals the given `count`
- pub fn assert_log_regex(&self, module: String, pattern: regex::Regex, count: usize) {
+ pub fn assert_log_regex(&self, module: &str, pattern: regex::Regex, count: usize) {
let log_entries = self.lines.lock().unwrap();
let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| {
- m == &module && pattern.is_match(&l)
+ m == module && pattern.is_match(&l)
}).map(|(_, c) | { c }).sum();
assert_eq!(l, count)
}
use alloc::sync::Arc;
use core::mem;
-use crate::sync::{Condvar, Mutex, MutexGuard};
+use crate::sync::Mutex;
use crate::prelude::*;
-#[cfg(any(test, feature = "std"))]
-use std::time::{Duration, Instant};
+#[cfg(feature = "std")]
+use crate::sync::Condvar;
+#[cfg(feature = "std")]
+use std::time::Duration;
use core::future::Future as StdFuture;
use core::task::{Context, Poll};
/// Used to signal to one of many waiters that the condition they're waiting on has happened.
pub(crate) struct Notifier {
notify_pending: Mutex<(bool, Option<Arc<Mutex<FutureState>>>)>,
- condvar: Condvar,
-}
-
-macro_rules! check_woken {
- ($guard: expr, $retval: expr) => { {
- if $guard.0 {
- $guard.0 = false;
- if $guard.1.as_ref().map(|l| l.lock().unwrap().complete).unwrap_or(false) {
- // If we're about to return as woken, and the future state is marked complete, wipe
- // the future state and let the next future wait until we get a new notify.
- $guard.1.take();
- }
- return $retval;
- }
- } }
}
impl Notifier {
pub(crate) fn new() -> Self {
Self {
notify_pending: Mutex::new((false, None)),
- condvar: Condvar::new(),
- }
- }
-
- fn propagate_future_state_to_notify_flag(&self) -> MutexGuard<(bool, Option<Arc<Mutex<FutureState>>>)> {
- let mut lock = self.notify_pending.lock().unwrap();
- if let Some(existing_state) = &lock.1 {
- if existing_state.lock().unwrap().callbacks_made {
- // If the existing `FutureState` has completed and actually made callbacks,
- // consider the notification flag to have been cleared and reset the future state.
- lock.1.take();
- lock.0 = false;
- }
- }
- lock
- }
-
- pub(crate) fn wait(&self) {
- loop {
- let mut guard = self.propagate_future_state_to_notify_flag();
- check_woken!(guard, ());
- guard = self.condvar.wait(guard).unwrap();
- check_woken!(guard, ());
- }
- }
-
- #[cfg(any(test, feature = "std"))]
- pub(crate) fn wait_timeout(&self, max_wait: Duration) -> bool {
- let current_time = Instant::now();
- loop {
- let mut guard = self.propagate_future_state_to_notify_flag();
- check_woken!(guard, true);
- guard = self.condvar.wait_timeout(guard, max_wait).unwrap().0;
- check_woken!(guard, true);
- // Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the
- // desired wait time has actually passed, and if not then restart the loop with a reduced wait
- // time. Note that this logic can be highly simplified through the use of
- // `Condvar::wait_while` and `Condvar::wait_timeout_while`, if and when our MSRV is raised to
- // 1.42.0.
- let elapsed = current_time.elapsed();
- if elapsed >= max_wait {
- return false;
- }
- match max_wait.checked_sub(elapsed) {
- None => return false,
- Some(_) => continue
- }
}
}
}
}
lock.0 = true;
- mem::drop(lock);
- self.condvar.notify_all();
}
/// Gets a [`Future`] that will get woken up with any waiters
pub(crate) fn get_future(&self) -> Future {
- let mut lock = self.propagate_future_state_to_notify_flag();
+ let mut lock = self.notify_pending.lock().unwrap();
+ if let Some(existing_state) = &lock.1 {
+ if existing_state.lock().unwrap().callbacks_made {
+ // If the existing `FutureState` has completed and actually made callbacks,
+ // consider the notification flag to have been cleared and reset the future state.
+ lock.1.take();
+ lock.0 = false;
+ }
+ }
if let Some(existing_state) = &lock.1 {
Future { state: Arc::clone(&existing_state) }
} else {
}
}
+macro_rules! define_callback { ($($bounds: path),*) => {
/// A callback which is called when a [`Future`] completes.
///
/// Note that this MUST NOT call back into LDK directly, it must instead schedule actions to be
///
/// Note that the [`std::future::Future`] implementation may only work for runtimes which schedule
/// futures when they receive a wake, rather than immediately executing them.
-pub trait FutureCallback : Send {
+pub trait FutureCallback : $($bounds +)* {
/// The method which is called.
fn call(&self);
}
-impl<F: Fn() + Send> FutureCallback for F {
+impl<F: Fn() $(+ $bounds)*> FutureCallback for F {
fn call(&self) { (self)(); }
}
+} }
+
+#[cfg(feature = "std")]
+define_callback!(Send);
+#[cfg(not(feature = "std"))]
+define_callback!();
pub(crate) struct FutureState {
// When we're tracking whether a callback counts as having woken the user's code, we check the
}
/// A simple future which can complete once, and calls some callback(s) when it does so.
+///
+/// Clones can be made and all futures cloned from the same source will complete at the same time.
+#[derive(Clone)]
pub struct Future {
state: Arc<Mutex<FutureState>>,
}
/// Registers a callback to be called upon completion of this future. If the future has already
/// completed, the callback will be called immediately.
///
- /// (C-not exported) use the bindings-only `register_callback_fn` instead
+ /// This is not exported to bindings users, use the bindings-only `register_callback_fn` instead
pub fn register_callback(&self, callback: Box<dyn FutureCallback>) {
let mut state = self.state.lock().unwrap();
if state.complete {
pub fn register_callback_fn<F: 'static + FutureCallback>(&self, callback: F) {
self.register_callback(Box::new(callback));
}
+
+ /// Waits until this [`Future`] completes.
+ #[cfg(feature = "std")]
+ pub fn wait(self) {
+ Sleeper::from_single_future(self).wait();
+ }
+
+ /// Waits until this [`Future`] completes or the given amount of time has elapsed.
+ ///
+ /// Returns true if the [`Future`] completed, false if the time elapsed.
+ #[cfg(feature = "std")]
+ pub fn wait_timeout(self, max_wait: Duration) -> bool {
+ Sleeper::from_single_future(self).wait_timeout(max_wait)
+ }
+
+ #[cfg(test)]
+ pub fn poll_is_complete(&self) -> bool {
+ let mut state = self.state.lock().unwrap();
+ if state.complete {
+ state.callbacks_made = true;
+ true
+ } else { false }
+ }
}
use core::task::Waker;
fn call(&self) { self.0.wake_by_ref() }
}
-/// (C-not exported) as Rust Futures aren't usable in language bindings.
+/// This is not exported to bindings users as Rust Futures aren't usable in language bindings.
impl<'a> StdFuture for Future {
type Output = ();
}
}
+/// A struct which can be used to select across many [`Future`]s at once without relying on a full
+/// async context.
+#[cfg(feature = "std")]
+pub struct Sleeper {
+ notifiers: Vec<Arc<Mutex<FutureState>>>,
+}
+
+#[cfg(feature = "std")]
+impl Sleeper {
+ /// Constructs a new sleeper from one future, allowing blocking on it.
+ pub fn from_single_future(future: Future) -> Self {
+ Self { notifiers: vec![future.state] }
+ }
+ /// Constructs a new sleeper from two futures, allowing blocking on both at once.
+ // Note that this is the common case - a ChannelManager and ChainMonitor.
+ pub fn from_two_futures(fut_a: Future, fut_b: Future) -> Self {
+ Self { notifiers: vec![fut_a.state, fut_b.state] }
+ }
+ /// Constructs a new sleeper on many futures, allowing blocking on all at once.
+ pub fn new(futures: Vec<Future>) -> Self {
+ Self { notifiers: futures.into_iter().map(|f| f.state).collect() }
+ }
+ /// Prepares to go into a wait loop body, creating a condition variable which we can block on
+ /// and an `Arc<Mutex<Option<_>>>` which gets set to the waking `Future`'s state prior to the
+ /// condition variable being woken.
+ fn setup_wait(&self) -> (Arc<Condvar>, Arc<Mutex<Option<Arc<Mutex<FutureState>>>>>) {
+ let cv = Arc::new(Condvar::new());
+ let notified_fut_mtx = Arc::new(Mutex::new(None));
+ {
+ for notifier_mtx in self.notifiers.iter() {
+ let cv_ref = Arc::clone(&cv);
+ let notified_fut_ref = Arc::clone(¬ified_fut_mtx);
+ let notifier_ref = Arc::clone(¬ifier_mtx);
+ let mut notifier = notifier_mtx.lock().unwrap();
+ if notifier.complete {
+ *notified_fut_mtx.lock().unwrap() = Some(notifier_ref);
+ break;
+ }
+ notifier.callbacks.push((false, Box::new(move || {
+ *notified_fut_ref.lock().unwrap() = Some(Arc::clone(¬ifier_ref));
+ cv_ref.notify_all();
+ })));
+ }
+ }
+ (cv, notified_fut_mtx)
+ }
+
+ /// Wait until one of the [`Future`]s registered with this [`Sleeper`] has completed.
+ pub fn wait(&self) {
+ let (cv, notified_fut_mtx) = self.setup_wait();
+ let notified_fut = cv.wait_while(notified_fut_mtx.lock().unwrap(), |fut_opt| fut_opt.is_none())
+ .unwrap().take().expect("CV wait shouldn't have returned until the notifying future was set");
+ notified_fut.lock().unwrap().callbacks_made = true;
+ }
+
+ /// Wait until one of the [`Future`]s registered with this [`Sleeper`] has completed or the
+ /// given amount of time has elapsed. Returns true if a [`Future`] completed, false if the time
+ /// elapsed.
+ pub fn wait_timeout(&self, max_wait: Duration) -> bool {
+ let (cv, notified_fut_mtx) = self.setup_wait();
+ let notified_fut =
+ match cv.wait_timeout_while(notified_fut_mtx.lock().unwrap(), max_wait, |fut_opt| fut_opt.is_none()) {
+ Ok((_, e)) if e.timed_out() => return false,
+ Ok((mut notified_fut, _)) =>
+ notified_fut.take().expect("CV wait shouldn't have returned until the notifying future was set"),
+ Err(_) => panic!("Previous panic while a lock was held led to a lock panic"),
+ };
+ notified_fut.lock().unwrap().callbacks_made = true;
+ true
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
let exit_thread_clone = exit_thread.clone();
thread::spawn(move || {
loop {
- let mut lock = thread_notifier.notify_pending.lock().unwrap();
- lock.0 = true;
- thread_notifier.condvar.notify_all();
-
+ thread_notifier.notify();
if exit_thread_clone.load(Ordering::SeqCst) {
break
}
});
// Check that we can block indefinitely until updates are available.
- let _ = persistence_notifier.wait();
+ let _ = persistence_notifier.get_future().wait();
// Check that the Notifier will return after the given duration if updates are
// available.
loop {
- if persistence_notifier.wait_timeout(Duration::from_millis(100)) {
+ if persistence_notifier.get_future().wait_timeout(Duration::from_millis(100)) {
break
}
}
// Check that the Notifier will return after the given duration even if no updates
// are available.
loop {
- if !persistence_notifier.wait_timeout(Duration::from_millis(100)) {
+ if !persistence_notifier.get_future().wait_timeout(Duration::from_millis(100)) {
break
}
}
}
#[test]
+ #[cfg(feature = "std")]
fn test_dropped_future_doesnt_count() {
// Tests that if a Future gets drop'd before it is poll()ed `Ready` it doesn't count as
// having been woken, leaving the notify-required flag set.
// If we get a future and don't touch it we're definitely still notify-required.
notifier.get_future();
- assert!(notifier.wait_timeout(Duration::from_millis(1)));
- assert!(!notifier.wait_timeout(Duration::from_millis(1)));
+ assert!(notifier.get_future().wait_timeout(Duration::from_millis(1)));
+ assert!(!notifier.get_future().wait_timeout(Duration::from_millis(1)));
// Even if we poll'd once but didn't observe a `Ready`, we should be notify-required.
let mut future = notifier.get_future();
notifier.notify();
assert!(woken.load(Ordering::SeqCst));
- assert!(notifier.wait_timeout(Duration::from_millis(1)));
+ assert!(notifier.get_future().wait_timeout(Duration::from_millis(1)));
// However, once we do poll `Ready` it should wipe the notify-required flag.
let mut future = notifier.get_future();
notifier.notify();
assert!(woken.load(Ordering::SeqCst));
assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Ready(()));
- assert!(!notifier.wait_timeout(Duration::from_millis(1)));
+ assert!(!notifier.get_future().wait_timeout(Duration::from_millis(1)));
}
#[test]
assert!(woken.load(Ordering::SeqCst));
assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Ready(()));
}
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn test_multi_future_sleep() {
+ // Tests the `Sleeper` with multiple futures.
+ let notifier_a = Notifier::new();
+ let notifier_b = Notifier::new();
+
+ // Set both notifiers as woken without sleeping yet.
+ notifier_a.notify();
+ notifier_b.notify();
+ Sleeper::from_two_futures(notifier_a.get_future(), notifier_b.get_future()).wait();
+
+ // One future has woken us up, but the other should still have a pending notification.
+ Sleeper::from_two_futures(notifier_a.get_future(), notifier_b.get_future()).wait();
+
+ // However once we've slept twice, we should no longer have any pending notifications
+ assert!(!Sleeper::from_two_futures(notifier_a.get_future(), notifier_b.get_future())
+ .wait_timeout(Duration::from_millis(10)));
+
+ // Test ordering somewhat more.
+ notifier_a.notify();
+ Sleeper::from_two_futures(notifier_a.get_future(), notifier_b.get_future()).wait();
+ }
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn sleeper_with_pending_callbacks() {
+ // This is similar to the above `test_multi_future_sleep` test, but in addition registers
+ // "normal" callbacks which will cause the futures to assume notification has occurred,
+ // rather than waiting for a woken sleeper.
+ let notifier_a = Notifier::new();
+ let notifier_b = Notifier::new();
+
+ // Set both notifiers as woken without sleeping yet.
+ notifier_a.notify();
+ notifier_b.notify();
+
+ // After sleeping one future (not guaranteed which one, however) will have its notification
+ // bit cleared.
+ Sleeper::from_two_futures(notifier_a.get_future(), notifier_b.get_future()).wait();
+
+ // By registering a callback on the futures for both notifiers, one will complete
+ // immediately, but one will remain tied to the notifier, and will complete once the
+ // notifier is next woken, which will be considered the completion of the notification.
+ let callback_a = Arc::new(AtomicBool::new(false));
+ let callback_b = Arc::new(AtomicBool::new(false));
+ let callback_a_ref = Arc::clone(&callback_a);
+ let callback_b_ref = Arc::clone(&callback_b);
+ notifier_a.get_future().register_callback(Box::new(move || assert!(!callback_a_ref.fetch_or(true, Ordering::SeqCst))));
+ notifier_b.get_future().register_callback(Box::new(move || assert!(!callback_b_ref.fetch_or(true, Ordering::SeqCst))));
+ assert!(callback_a.load(Ordering::SeqCst) ^ callback_b.load(Ordering::SeqCst));
+
+ // If we now notify both notifiers again, the other callback will fire, completing the
+ // notification, and we'll be back to one pending notification.
+ notifier_a.notify();
+ notifier_b.notify();
+
+ assert!(callback_a.load(Ordering::SeqCst) && callback_b.load(Ordering::SeqCst));
+ Sleeper::from_two_futures(notifier_a.get_future(), notifier_b.get_future()).wait();
+ assert!(!Sleeper::from_two_futures(notifier_a.get_future(), notifier_b.get_future())
+ .wait_timeout(Duration::from_millis(10)));
+ }
}
--- /dev/null
+## Backwards Compatibility
+
+- Providing `ChannelMonitorUpdate`s generated by LDK 0.0.115 to a
+`ChannelMonitor` on 0.0.114 or before may panic.
--- /dev/null
+## API Updates
+
+- `Event::PaymentPathFailed::retry` will always be `None` if we initiate a payment on 0.0.115
+ then downgrade to an earlier version (#2063)