lightning_persister::fs_store::bench::bench_sends,
lightning_rapid_gossip_sync::bench::bench_reading_full_graph_from_file,
lightning::routing::gossip::benches::read_network_graph,
- lightning::routing::gossip::benches::write_network_graph);
+ lightning::routing::gossip::benches::write_network_graph,
+ lightning::routing::scoring::benches::decay_100k_channel_bounds);
criterion_main!(benches);
pass
elif cfg == "taproot":
pass
+ elif cfg == "async_signing":
+ pass
elif cfg == "require_route_graph_test":
pass
else:
popd
fi
-echo -e "\n\nTest Taproot builds"
-pushd lightning
+echo -e "\n\nTest cfg-flag builds"
RUSTFLAGS="$RUSTFLAGS --cfg=taproot" cargo test --verbose --color always -p lightning
-popd
+RUSTFLAGS="$RUSTFLAGS --cfg=async_signing" cargo test --verbose --color always -p lightning
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hash_types::{BlockHash, WPubkeyHash};
+use lightning::blinded_path::BlindedPath;
+use lightning::blinded_path::payment::ReceiveTlvs;
use lightning::chain;
use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, chainmonitor, channelmonitor, Confirm, Watch};
use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent};
use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
use lightning::ln::script::ShutdownScript;
use lightning::ln::functional_test_utils::*;
-use lightning::offers::invoice::UnsignedBolt12Invoice;
+use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
use lightning::offers::invoice_request::UnsignedInvoiceRequest;
+use lightning::onion_message::{Destination, MessageRouter, OnionMessagePath};
use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState};
use lightning::util::errors::APIError;
use lightning::util::logger::Logger;
use crate::utils::test_logger::{self, Output};
use crate::utils::test_persister::TestPersister;
-use bitcoin::secp256k1::{Message, PublicKey, SecretKey, Scalar, Secp256k1};
+use bitcoin::secp256k1::{Message, PublicKey, SecretKey, Scalar, Secp256k1, self};
use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature};
use bitcoin::secp256k1::schnorr;
action: msgs::ErrorAction::IgnoreError
})
}
+
+ fn create_blinded_payment_paths<
+ ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, _recipient: PublicKey, _first_hops: Vec<ChannelDetails>, _tlvs: ReceiveTlvs,
+ _amount_msats: u64, _entropy_source: &ES, _secp_ctx: &Secp256k1<T>
+ ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
+ unreachable!()
+ }
+}
+
+impl MessageRouter for FuzzRouter {
+ fn find_path(
+ &self, _sender: PublicKey, _peers: Vec<PublicKey>, _destination: Destination
+ ) -> Result<OnionMessagePath, ()> {
+ unreachable!()
+ }
+
+ fn create_blinded_paths<
+ ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
+ _secp_ctx: &Secp256k1<T>
+ ) -> Result<Vec<BlindedPath>, ()> {
+ unreachable!()
+ }
}
pub struct TestBroadcaster {}
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
+use lightning::blinded_path::BlindedPath;
+use lightning::blinded_path::payment::ReceiveTlvs;
use lightning::chain;
use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen};
use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
use lightning::ln::msgs::{self, DecodeError};
use lightning::ln::script::ShutdownScript;
use lightning::ln::functional_test_utils::*;
-use lightning::offers::invoice::UnsignedBolt12Invoice;
+use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
use lightning::offers::invoice_request::UnsignedInvoiceRequest;
+use lightning::onion_message::{Destination, MessageRouter, OnionMessagePath};
use lightning::routing::gossip::{P2PGossipSync, NetworkGraph};
use lightning::routing::utxo::UtxoLookup;
use lightning::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteParameters, Router};
use crate::utils::test_logger;
use crate::utils::test_persister::TestPersister;
-use bitcoin::secp256k1::{Message, PublicKey, SecretKey, Scalar, Secp256k1};
+use bitcoin::secp256k1::{Message, PublicKey, SecretKey, Scalar, Secp256k1, self};
use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature};
use bitcoin::secp256k1::schnorr;
action: msgs::ErrorAction::IgnoreError
})
}
+
+ fn create_blinded_payment_paths<
+ ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, _recipient: PublicKey, _first_hops: Vec<ChannelDetails>, _tlvs: ReceiveTlvs,
+ _amount_msats: u64, _entropy_source: &ES, _secp_ctx: &Secp256k1<T>
+ ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
+ unreachable!()
+ }
+}
+
+impl MessageRouter for FuzzRouter {
+ fn find_path(
+ &self, _sender: PublicKey, _peers: Vec<PublicKey>, _destination: Destination
+ ) -> Result<OnionMessagePath, ()> {
+ unreachable!()
+ }
+
+ fn create_blinded_paths<
+ ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
+ _secp_ctx: &Secp256k1<T>
+ ) -> Result<Vec<BlindedPath>, ()> {
+ unreachable!()
+ }
}
struct TestBroadcaster {
#[inline]
pub fn onion_hop_data_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
use lightning::util::ser::ReadableArgs;
+ use bitcoin::secp256k1::PublicKey;
let mut r = ::std::io::Cursor::new(data);
let node_signer = test_utils::TestNodeSigner::new(test_utils::privkey(42));
- let _ = <lightning::ln::msgs::InboundOnionPayload as ReadableArgs<&&test_utils::TestNodeSigner>>::read(&mut r, &&node_signer);
+ let _ = <lightning::ln::msgs::InboundOnionPayload as ReadableArgs<(Option<PublicKey>, &&test_utils::TestNodeSigner)>>::read(&mut r, (None, &&node_signer));
}
#[no_mangle]
pub extern "C" fn onion_hop_data_run(data: *const u8, datalen: usize) {
use lightning::util::ser::ReadableArgs;
+ use bitcoin::secp256k1::PublicKey;
let data = unsafe { std::slice::from_raw_parts(data, datalen) };
let mut r = ::std::io::Cursor::new(data);
let node_signer = test_utils::TestNodeSigner::new(test_utils::privkey(42));
- let _ = <lightning::ln::msgs::InboundOnionPayload as ReadableArgs<&&test_utils::TestNodeSigner>>::read(&mut r, &&node_signer);
+ let _ = <lightning::ln::msgs::InboundOnionPayload as ReadableArgs<(Option<PublicKey>, &&test_utils::TestNodeSigner)>>::read(&mut r, (None, &&node_signer));
}
// Imports that need to be added manually
use bitcoin::bech32::u5;
use bitcoin::blockdata::script::ScriptBuf;
-use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey};
+use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey, self};
use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1::ecdsa::RecoverableSignature;
use bitcoin::secp256k1::schnorr;
-use lightning::sign::{Recipient, KeyMaterial, EntropySource, NodeSigner, SignerProvider};
+use lightning::blinded_path::BlindedPath;
use lightning::ln::features::InitFeatures;
use lightning::ln::msgs::{self, DecodeError, OnionMessageHandler};
use lightning::ln::script::ShutdownScript;
use lightning::offers::invoice::UnsignedBolt12Invoice;
use lightning::offers::invoice_request::UnsignedInvoiceRequest;
+use lightning::sign::{Recipient, KeyMaterial, EntropySource, NodeSigner, SignerProvider};
use lightning::util::test_channel_signer::TestChannelSigner;
use lightning::util::logger::Logger;
use lightning::util::ser::{Readable, Writeable, Writer};
first_node_addresses: None,
})
}
+
+ fn create_blinded_paths<
+ ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
+ _secp_ctx: &Secp256k1<T>
+ ) -> Result<Vec<BlindedPath>, ()> {
+ unreachable!()
+ }
}
struct TestOffersMessageHandler {}
lightning-rapid-gossip-sync = { version = "0.0.118", path = "../lightning-rapid-gossip-sync", default-features = false }
[dev-dependencies]
-tokio = { version = "1.14", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] }
+tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] }
lightning = { version = "0.0.118", path = "../lightning", features = ["_test_utils"] }
lightning-invoice = { version = "0.26.0", path = "../lightning-invoice" }
lightning-persister = { version = "0.0.118", path = "../lightning-persister" }
const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
#[cfg(not(test))]
-const SCORER_PERSIST_TIMER: u64 = 60 * 60;
+const SCORER_PERSIST_TIMER: u64 = 60 * 5;
#[cfg(test)]
const SCORER_PERSIST_TIMER: u64 = 1;
/// Updates scorer based on event and returns whether an update occurred so we can decide whether
/// to persist.
fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
- scorer: &'a S, event: &Event
+ scorer: &'a S, event: &Event, duration_since_epoch: Duration,
) -> bool {
match event {
Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
let mut score = scorer.write_lock();
- score.payment_path_failed(path, *scid);
+ score.payment_path_failed(path, *scid, duration_since_epoch);
},
Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
// Reached if the destination explicitly failed it back. We treat this as a successful probe
// because the payment made it all the way to the destination with sufficient liquidity.
let mut score = scorer.write_lock();
- score.probe_successful(path);
+ score.probe_successful(path, duration_since_epoch);
},
Event::PaymentPathSuccessful { path, .. } => {
let mut score = scorer.write_lock();
- score.payment_path_successful(path);
+ score.payment_path_successful(path, duration_since_epoch);
},
Event::ProbeSuccessful { path, .. } => {
let mut score = scorer.write_lock();
- score.probe_successful(path);
+ score.probe_successful(path, duration_since_epoch);
},
Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
let mut score = scorer.write_lock();
- score.probe_failed(path, *scid);
+ score.probe_failed(path, *scid, duration_since_epoch);
},
_ => return false,
}
$channel_manager: ident, $process_channel_manager_events: expr,
$peer_manager: ident, $process_onion_message_handler_events: expr, $gossip_sync: ident,
$logger: ident, $scorer: ident, $loop_exit_check: expr, $await: expr, $get_timer: expr,
- $timer_elapsed: expr, $check_slow_await: expr
+ $timer_elapsed: expr, $check_slow_await: expr, $time_fetch: expr,
) => { {
log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
$channel_manager.timer_tick_occurred();
let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
let mut have_pruned = false;
+ let mut have_decayed_scorer = false;
loop {
$process_channel_manager_events;
if should_prune {
// The network graph must not be pruned while rapid sync completion is pending
if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
- #[cfg(feature = "std")] {
+ if let Some(duration_since_epoch) = $time_fetch() {
log_trace!($logger, "Pruning and persisting network graph.");
- network_graph.remove_stale_channels_and_tracking();
- }
- #[cfg(not(feature = "std"))] {
+ network_graph.remove_stale_channels_and_tracking_with_time(duration_since_epoch.as_secs());
+ } else {
log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
log_trace!($logger, "Persisting network graph.");
}
last_prune_call = $get_timer(prune_timer);
}
+ if !have_decayed_scorer {
+ if let Some(ref scorer) = $scorer {
+ if let Some(duration_since_epoch) = $time_fetch() {
+ log_trace!($logger, "Calling time_passed on scorer at startup");
+ scorer.write_lock().time_passed(duration_since_epoch);
+ }
+ }
+ have_decayed_scorer = true;
+ }
+
if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
if let Some(ref scorer) = $scorer {
- log_trace!($logger, "Persisting scorer");
+ if let Some(duration_since_epoch) = $time_fetch() {
+ log_trace!($logger, "Calling time_passed and persisting scorer");
+ scorer.write_lock().time_passed(duration_since_epoch);
+ } else {
+ log_trace!($logger, "Persisting scorer");
+ }
if let Err(e) = $persister.persist_scorer(&scorer) {
log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
}
/// are unsure, you should set the flag, as the performance impact of it is minimal unless there
/// are hundreds or thousands of simultaneous process calls running.
///
+/// The `fetch_time` parameter should return the current wall clock time, if one is available. If
+/// no time is available, some features may be disabled, however the node will still operate fine.
+///
/// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
/// could setup `process_events_async` like this:
/// ```
/// # use lightning::io;
/// # use std::sync::{Arc, RwLock};
/// # use std::sync::atomic::{AtomicBool, Ordering};
+/// # use std::time::SystemTime;
/// # use lightning_background_processor::{process_events_async, GossipSync};
/// # struct MyStore {}
/// # impl lightning::util::persist::KVStore for MyStore {
/// Some(background_scorer),
/// sleeper,
/// mobile_interruptable_platform,
+/// || Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap())
/// )
/// .await
/// .expect("Failed to process events");
S: 'static + Deref<Target = SC> + Send + Sync,
SC: for<'b> WriteableScore<'b>,
SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
- Sleeper: Fn(Duration) -> SleepFuture
+ Sleeper: Fn(Duration) -> SleepFuture,
+ FetchTime: Fn() -> Option<Duration>,
>(
persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
- sleeper: Sleeper, mobile_interruptable_platform: bool,
+ sleeper: Sleeper, mobile_interruptable_platform: bool, fetch_time: FetchTime,
) -> Result<(), lightning::io::Error>
where
UL::Target: 'static + UtxoLookup,
let scorer = &scorer;
let logger = &logger;
let persister = &persister;
+ let fetch_time = &fetch_time;
async move {
if let Some(network_graph) = network_graph {
handle_network_graph_update(network_graph, &event)
}
if let Some(ref scorer) = scorer {
- if update_scorer(scorer, &event) {
- log_trace!(logger, "Persisting scorer after update");
- if let Err(e) = persister.persist_scorer(&scorer) {
- log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
+ if let Some(duration_since_epoch) = fetch_time() {
+ if update_scorer(scorer, &event, duration_since_epoch) {
+ log_trace!(logger, "Persisting scorer after update");
+ if let Err(e) = persister.persist_scorer(&scorer) {
+ log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
+ }
}
}
}
task::Poll::Ready(exit) => { should_break = exit; true },
task::Poll::Pending => false,
}
- }, mobile_interruptable_platform
+ }, mobile_interruptable_platform, fetch_time,
)
}
handle_network_graph_update(network_graph, &event)
}
if let Some(ref scorer) = scorer {
- if update_scorer(scorer, &event) {
+ use std::time::SystemTime;
+ let duration_since_epoch = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
+ .expect("Time should be sometime after 1970");
+ if update_scorer(scorer, &event, duration_since_epoch) {
log_trace!(logger, "Persisting scorer after update");
if let Err(e) = persister.persist_scorer(&scorer) {
log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
channel_manager.get_event_or_persistence_needed_future(),
chain_monitor.get_update_future()
).wait_timeout(Duration::from_millis(100)); },
- |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false
+ |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false,
+ || {
+ use std::time::SystemTime;
+ Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
+ .expect("Time should be sometime after 1970"))
+ },
)
});
Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
}
impl ScoreUpdate for TestScorer {
- fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
+ fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64, _: Duration) {
if let Some(expectations) = &mut self.event_expectations {
match expectations.pop_front().unwrap() {
TestResult::PaymentFailure { path, short_channel_id } => {
}
}
- fn payment_path_successful(&mut self, actual_path: &Path) {
+ fn payment_path_successful(&mut self, actual_path: &Path, _: Duration) {
if let Some(expectations) = &mut self.event_expectations {
match expectations.pop_front().unwrap() {
TestResult::PaymentFailure { path, .. } => {
}
}
- fn probe_failed(&mut self, actual_path: &Path, _: u64) {
+ fn probe_failed(&mut self, actual_path: &Path, _: u64, _: Duration) {
if let Some(expectations) = &mut self.event_expectations {
match expectations.pop_front().unwrap() {
TestResult::PaymentFailure { path, .. } => {
}
}
}
- fn probe_successful(&mut self, actual_path: &Path) {
+ fn probe_successful(&mut self, actual_path: &Path, _: Duration) {
if let Some(expectations) = &mut self.event_expectations {
match expectations.pop_front().unwrap() {
TestResult::PaymentFailure { path, .. } => {
}
}
}
+ fn time_passed(&mut self, _: Duration) {}
}
#[cfg(c_bindings)]
tokio::time::sleep(dur).await;
false // Never exit
})
- }, false,
+ }, false, || Some(Duration::ZERO),
);
match bp_future.await {
Ok(_) => panic!("Expected error persisting manager"),
loop {
let log_entries = nodes[0].logger.lines.lock().unwrap();
- let expected_log = "Persisting scorer".to_string();
+ let expected_log = "Calling time_passed and persisting scorer".to_string();
if log_entries.get(&("lightning_background_processor", expected_log)).is_some() {
break
}
_ = exit_receiver.changed() => true,
}
})
- }, false,
+ }, false, || Some(Duration::from_secs(1696300000)),
);
let t1 = tokio::spawn(bp_future);
_ = exit_receiver.changed() => true,
}
})
- }, false,
+ }, false, || Some(Duration::ZERO),
);
let t1 = tokio::spawn(bp_future);
let t2 = tokio::spawn(async move {
bitcoin = "0.30.2"
hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
lightning = { version = "0.0.118", path = "../lightning" }
-tokio = { version = "1.0", features = [ "io-util", "net", "time", "rt" ], optional = true }
+tokio = { version = "1.35", features = [ "io-util", "net", "time", "rt" ], optional = true }
serde_json = { version = "1.0", optional = true }
chunked_transfer = { version = "1.4", optional = true }
[dev-dependencies]
lightning = { version = "0.0.118", path = "../lightning", features = ["_test_utils"] }
-tokio = { version = "1.14", features = [ "macros", "rt" ] }
+tokio = { version = "1.35", features = [ "macros", "rt" ] }
impl TryInto<Txid> for JsonResponse {
type Error = std::io::Error;
fn try_into(self) -> std::io::Result<Txid> {
- match self.0.as_str() {
- None => Err(std::io::Error::new(
- std::io::ErrorKind::InvalidData,
- "expected JSON string",
- )),
- Some(hex_data) => match Vec::<u8>::from_hex(hex_data) {
- Err(_) => Err(std::io::Error::new(
- std::io::ErrorKind::InvalidData,
- "invalid hex data",
- )),
- Ok(txid_data) => match encode::deserialize(&txid_data) {
- Err(_) => Err(std::io::Error::new(
- std::io::ErrorKind::InvalidData,
- "invalid txid",
- )),
- Ok(txid) => Ok(txid),
- },
- },
- }
+ let hex_data = self.0.as_str().ok_or(Self::Error::new(std::io::ErrorKind::InvalidData, "expected JSON string" ))?;
+ Txid::from_str(hex_data).map_err(|err|Self::Error::new(std::io::ErrorKind::InvalidData, err.to_string() ))
}
}
match TryInto::<Txid>::try_into(response) {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
- assert_eq!(e.get_ref().unwrap().to_string(), "invalid hex data");
+ assert_eq!(e.get_ref().unwrap().to_string(), "bad hex string length 6 (expected 64)");
}
Ok(_) => panic!("Expected error"),
}
match TryInto::<Txid>::try_into(response) {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
- assert_eq!(e.get_ref().unwrap().to_string(), "invalid txid");
+ assert_eq!(e.get_ref().unwrap().to_string(), "bad hex string length 4 (expected 64)");
}
Ok(_) => panic!("Expected error"),
}
}
}
+ #[test]
+ fn into_txid_from_bitcoind_rpc_json_response() {
+ let mut rpc_response = serde_json::json!(
+ {"error": "", "id": "770", "result": "7934f775149929a8b742487129a7c3a535dfb612f0b726cc67bc10bc2628f906"}
+
+ );
+ let r: std::io::Result<Txid> = JsonResponse(rpc_response.get_mut("result").unwrap().take())
+ .try_into();
+ assert_eq!(
+ r.unwrap().to_string(),
+ "7934f775149929a8b742487129a7c3a535dfb612f0b726cc67bc10bc2628f906"
+ );
+ }
+
// TryInto<Transaction> can be used in two ways, first with plain hex response where data is
// the hex encoded transaction (e.g. as a result of getrawtransaction) or as a JSON object
// where the hex encoded transaction can be found in the hex field of the object (if present)
[dependencies]
bitcoin = "0.30.2"
lightning = { version = "0.0.118", path = "../lightning" }
-tokio = { version = "1.0", features = [ "rt", "sync", "net", "time" ] }
+tokio = { version = "1.35", features = [ "rt", "sync", "net", "time" ] }
[dev-dependencies]
-tokio = { version = "1.14", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
+tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
lightning = { version = "0.0.118", path = "../lightning", features = ["_test_utils"] }
[dev-dependencies]
lightning = { version = "0.0.118", path = "../lightning", default-features = false, features = ["std", "_test_utils"] }
-tokio = { version = "1.14.0", features = ["full"] }
+tokio = { version = "1.35.0", features = ["full"] }
[target.'cfg(not(no_download))'.dev-dependencies]
electrsd = { version = "0.26.0", default-features = false, features = ["legacy", "esplora_a33e97e1", "bitcoind_25_0"] }
# Override signing to not include randomness when generating signatures for test vectors.
_test_vectors = []
-no-std = ["hashbrown", "bitcoin/no-std", "core2/alloc"]
+no-std = ["hashbrown", "bitcoin/no-std", "core2/alloc", "libm"]
std = ["bitcoin/std"]
# Generates low-r bitcoin signatures, which saves 1 byte in 50% of the cases
backtrace = { version = "0.3", optional = true }
core2 = { version = "0.3.0", optional = true, default-features = false }
+libm = { version = "0.2", optional = true, default-features = false }
[dev-dependencies]
regex = "1.5.6"
///
/// [`ForwardTlvs`]: crate::blinded_path::payment::ForwardTlvs
// TODO: make all payloads the same size with padding + add dummy hops
- pub(crate) fn new_for_payment<ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification>(
+ pub fn new_for_payment<ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification>(
intermediate_nodes: &[payment::ForwardNode], payee_node_id: PublicKey,
payee_tlvs: payment::ReceiveTlvs, htlc_maximum_msat: u64, entropy_source: &ES,
secp_ctx: &Secp256k1<T>
use crate::blinded_path::utils;
use crate::io;
use crate::ln::PaymentSecret;
+use crate::ln::channelmanager::CounterpartyForwardingInfo;
use crate::ln::features::BlindedHopFeatures;
use crate::ln::msgs::DecodeError;
use crate::offers::invoice::BlindedPayInfo;
pub htlc_minimum_msat: u64,
}
+impl From<CounterpartyForwardingInfo> for PaymentRelay {
+ fn from(info: CounterpartyForwardingInfo) -> Self {
+ let CounterpartyForwardingInfo {
+ fee_base_msat, fee_proportional_millionths, cltv_expiry_delta
+ } = info;
+ Self { cltv_expiry_delta, fee_proportional_millionths, fee_base_msat }
+ }
+}
+
impl Writeable for ForwardTlvs {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
encode_tlv_stream!(w, {
}
}
- fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(&mut self, broadcaster: &B, logger: &WithChannelMonitor<L>)
- where B::Target: BroadcasterInterface,
- L::Target: Logger,
- {
- let commit_txs = self.get_latest_holder_commitment_txn(logger);
- let mut txs = vec![];
- for tx in commit_txs.iter() {
- log_info!(logger, "Broadcasting local {}", log_tx!(tx));
- txs.push(tx);
- }
- broadcaster.broadcast_transactions(&txs);
+ fn generate_claimable_outpoints_and_watch_outputs(&mut self) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
+ let funding_outp = HolderFundingOutput::build(
+ self.funding_redeemscript.clone(),
+ self.channel_value_satoshis,
+ self.onchain_tx_handler.channel_type_features().clone()
+ );
+ let commitment_package = PackageTemplate::build_package(
+ self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
+ PackageSolvingData::HolderFundingOutput(funding_outp),
+ self.best_block.height(), self.best_block.height()
+ );
+ let mut claimable_outpoints = vec![commitment_package];
self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
+ // Although we aren't signing the transaction directly here, the transaction will be signed
+ // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
+ // new channel updates.
+ self.holder_tx_signed = true;
+ let mut watch_outputs = Vec::new();
+ // We can't broadcast our HTLC transactions while the commitment transaction is
+ // unconfirmed. We'll delay doing so until we detect the confirmed commitment in
+ // `transactions_confirmed`.
+ if !self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
+ // Because we're broadcasting a commitment transaction, we should construct the package
+ // assuming it gets confirmed in the next block. Sadly, we have code which considers
+ // "not yet confirmed" things as discardable, so we cannot do that here.
+ let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(
+ &self.current_holder_commitment_tx, self.best_block.height()
+ );
+ let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
+ let new_outputs = self.get_broadcasted_holder_watch_outputs(
+ &self.current_holder_commitment_tx, &unsigned_commitment_tx
+ );
+ if !new_outputs.is_empty() {
+ watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
+ }
+ claimable_outpoints.append(&mut new_outpoints);
+ }
+ (claimable_outpoints, watch_outputs)
+ }
+
+ pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast<B: Deref, F: Deref, L: Deref>(
+ &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &WithChannelMonitor<L>
+ )
+ where
+ B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs();
+ self.onchain_tx_handler.update_claims_view_from_requests(
+ claimable_outpoints, self.best_block.height(), self.best_block.height(), broadcaster,
+ fee_estimator, logger
+ );
}
fn update_monitor<B: Deref, F: Deref, L: Deref>(
log_trace!(logger, "Avoiding commitment broadcast, already detected confirmed spend onchain");
continue;
}
- self.broadcast_latest_holder_commitment_txn(broadcaster, logger);
- // If the channel supports anchor outputs, we'll need to emit an external
- // event to be consumed such that a child transaction is broadcast with a
- // high enough feerate for the parent commitment transaction to confirm.
- if self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
- let funding_output = HolderFundingOutput::build(
- self.funding_redeemscript.clone(), self.channel_value_satoshis,
- self.onchain_tx_handler.channel_type_features().clone(),
- );
- let best_block_height = self.best_block.height();
- let commitment_package = PackageTemplate::build_package(
- self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
- PackageSolvingData::HolderFundingOutput(funding_output),
- best_block_height, best_block_height
- );
- self.onchain_tx_handler.update_claims_view_from_requests(
- vec![commitment_package], best_block_height, best_block_height,
- broadcaster, &bounded_fee_estimator, logger,
- );
- }
+ self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger);
} else if !self.holder_tx_signed {
log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
log_error!(logger, " in channel monitor for channel {}!", &self.funding_info.0.to_channel_id());
}
}
+ /// Cancels any existing pending claims for a commitment that previously confirmed and has now
+ /// been replaced by another.
+ pub fn cancel_prev_commitment_claims<L: Deref>(
+ &mut self, logger: &L, confirmed_commitment_txid: &Txid
+ ) where L::Target: Logger {
+ for (counterparty_commitment_txid, _) in &self.counterparty_commitment_txn_on_chain {
+ // Cancel any pending claims for counterparty commitments we've seen confirm.
+ if counterparty_commitment_txid == confirmed_commitment_txid {
+ continue;
+ }
+ for (htlc, _) in self.counterparty_claimable_outpoints.get(counterparty_commitment_txid).unwrap_or(&vec![]) {
+ log_trace!(logger, "Canceling claims for previously confirmed counterparty commitment {}",
+ counterparty_commitment_txid);
+ let mut outpoint = BitcoinOutPoint { txid: *counterparty_commitment_txid, vout: 0 };
+ if let Some(vout) = htlc.transaction_output_index {
+ outpoint.vout = vout;
+ self.onchain_tx_handler.abandon_claim(&outpoint);
+ }
+ }
+ }
+ if self.holder_tx_signed {
+ // If we've signed, we may have broadcast either commitment (prev or current), and
+ // attempted to claim from it immediately without waiting for a confirmation.
+ if self.current_holder_commitment_tx.txid != *confirmed_commitment_txid {
+ log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}",
+ self.current_holder_commitment_tx.txid);
+ let mut outpoint = BitcoinOutPoint { txid: self.current_holder_commitment_tx.txid, vout: 0 };
+ for (htlc, _, _) in &self.current_holder_commitment_tx.htlc_outputs {
+ if let Some(vout) = htlc.transaction_output_index {
+ outpoint.vout = vout;
+ self.onchain_tx_handler.abandon_claim(&outpoint);
+ }
+ }
+ }
+ if let Some(prev_holder_commitment_tx) = &self.prev_holder_signed_commitment_tx {
+ if prev_holder_commitment_tx.txid != *confirmed_commitment_txid {
+ log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}",
+ prev_holder_commitment_tx.txid);
+ let mut outpoint = BitcoinOutPoint { txid: prev_holder_commitment_tx.txid, vout: 0 };
+ for (htlc, _, _) in &prev_holder_commitment_tx.htlc_outputs {
+ if let Some(vout) = htlc.transaction_output_index {
+ outpoint.vout = vout;
+ self.onchain_tx_handler.abandon_claim(&outpoint);
+ }
+ }
+ }
+ }
+ } else {
+ // No previous claim.
+ }
+ }
+
fn get_latest_holder_commitment_txn<L: Deref>(
&mut self, logger: &WithChannelMonitor<L>,
) -> Vec<Transaction> where L::Target: Logger {
commitment_tx_to_counterparty_output,
},
});
+ // Now that we've detected a confirmed commitment transaction, attempt to cancel
+ // pending claims for any commitments that were previously confirmed such that
+ // we don't continue claiming inputs that no longer exist.
+ self.cancel_prev_commitment_claims(&logger, &txid);
}
}
if tx.input.len() >= 1 {
let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
if should_broadcast {
- let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone(), self.channel_value_satoshis, self.onchain_tx_handler.channel_type_features().clone());
- let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), self.best_block.height());
- claimable_outpoints.push(commitment_package);
- self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
- // Although we aren't signing the transaction directly here, the transaction will be signed
- // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
- // new channel updates.
- self.holder_tx_signed = true;
- // We can't broadcast our HTLC transactions while the commitment transaction is
- // unconfirmed. We'll delay doing so until we detect the confirmed commitment in
- // `transactions_confirmed`.
- if !self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
- // Because we're broadcasting a commitment transaction, we should construct the package
- // assuming it gets confirmed in the next block. Sadly, we have code which considers
- // "not yet confirmed" things as discardable, so we cannot do that here.
- let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
- let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
- let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &unsigned_commitment_tx);
- if !new_outputs.is_empty() {
- watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
- }
- claimable_outpoints.append(&mut new_outpoints);
- }
+ let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs();
+ claimable_outpoints.append(&mut new_outpoints);
+ watch_outputs.append(&mut new_outputs);
}
// Find which on-chain events have reached their confirmation threshold.
None
}
+ pub fn abandon_claim(&mut self, outpoint: &BitcoinOutPoint) {
+ let claim_id = self.claimable_outpoints.get(outpoint).map(|(claim_id, _)| *claim_id)
+ .or_else(|| {
+ self.pending_claim_requests.iter()
+ .find(|(_, claim)| claim.outpoints().iter().any(|claim_outpoint| *claim_outpoint == outpoint))
+ .map(|(claim_id, _)| *claim_id)
+ });
+ if let Some(claim_id) = claim_id {
+ if let Some(claim) = self.pending_claim_requests.remove(&claim_id) {
+ for outpoint in claim.outpoints() {
+ self.claimable_outpoints.remove(&outpoint);
+ }
+ }
+ } else {
+ self.locktimed_packages.values_mut().for_each(|claims|
+ claims.retain(|claim| !claim.outpoints().iter().any(|claim_outpoint| *claim_outpoint == outpoint)));
+ }
+ }
+
/// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
/// for this channel, provide new relevant on-chain transactions and/or new claim requests.
/// Together with `update_claims_view_from_matched_txn` this used to be named
#[cfg(any(test, feature = "_test_utils"))] extern crate regex;
#[cfg(not(feature = "std"))] extern crate core2;
+#[cfg(not(feature = "std"))] extern crate libm;
#[cfg(ldk_bench)] extern crate criterion;
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
use crate::blinded_path::BlindedPath;
use crate::blinded_path::payment::{ForwardNode, ForwardTlvs, PaymentConstraints, PaymentRelay, ReceiveTlvs};
-use crate::events::{HTLCDestination, MessageSendEventsProvider};
+use crate::events::{HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
use crate::ln::PaymentSecret;
use crate::ln::channelmanager;
use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
use crate::ln::onion_utils::INVALID_ONION_BLINDING;
use crate::ln::outbound_payment::Retry;
use crate::prelude::*;
-use crate::routing::router::{PaymentParameters, RouteParameters};
+use crate::routing::router::{Payee, PaymentParameters, RouteParameters};
use crate::util::config::UserConfig;
use crate::util::test_utils;
let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
let mut update_malformed = &mut updates.update_fail_malformed_htlcs[0];
- // Ensure the final hop does not correctly blind their error.
+ // Check that the final node encodes its failure correctly.
+ assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
+ assert_eq!(update_malformed.sha256_of_onion, [0; 32]);
+
+ // Modify such the final hop does not correctly blind their error so we can ensure the intro node
+ // converts it to the correct error.
update_malformed.sha256_of_onion = [1; 32];
nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), update_malformed);
do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false);
#[test]
fn blinded_intercept_payment() {
+ do_blinded_intercept_payment(true);
+ do_blinded_intercept_payment(false);
+}
+fn do_blinded_intercept_payment(intercept_node_fails: bool) {
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let mut intercept_forwards_config = test_default_channel_config();
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
- let chan_upd = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
+ let (channel_id, chan_upd) = {
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
+ (chan.2, chan.0.contents)
+ };
let amt_msat = 5000;
- let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
let intercept_scid = nodes[1].node.get_intercept_scid();
let mut intercept_chan_upd = chan_upd;
intercept_chan_upd.short_channel_id = intercept_scid;
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
- let intercept_id = match events[0] {
+ let (intercept_id, expected_outbound_amount_msat) = match events[0] {
crate::events::Event::HTLCIntercepted {
intercept_id, payment_hash: pmt_hash,
- requested_next_hop_scid: short_channel_id, ..
+ requested_next_hop_scid: short_channel_id, expected_outbound_amount_msat, ..
} => {
assert_eq!(pmt_hash, payment_hash);
assert_eq!(short_channel_id, intercept_scid);
- intercept_id
+ (intercept_id, expected_outbound_amount_msat)
},
_ => panic!()
};
- nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap();
- expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::UnknownNextHop { requested_forward_scid: intercept_scid }]);
- nodes[1].node.process_pending_htlc_forwards();
- let update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ if intercept_node_fails {
+ nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap();
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::UnknownNextHop { requested_forward_scid: intercept_scid }]);
+ nodes[1].node.process_pending_htlc_forwards();
+ let update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ check_added_monitors!(&nodes[1], 1);
+ assert!(update_fail.update_fail_htlcs.len() == 1);
+ let fail_msg = update_fail.update_fail_htlcs[0].clone();
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
+ commitment_signed_dance!(nodes[0], nodes[1], update_fail.commitment_signed, false);
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+ PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+ return
+ }
+
+ nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap();
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let payment_event = {
+ {
+ let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ added_monitors.clear();
+ }
+ let mut events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
+ };
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
+ commitment_signed_dance!(nodes[2], nodes[1], &payment_event.commitment_msg, false, true);
+ expect_pending_htlcs_forwardable!(nodes[2]);
+
+ expect_payment_claimable!(&nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id());
+ do_claim_payment_along_route(&nodes[0], &vec!(&vec!(&nodes[1], &nodes[2])[..]), false, payment_preimage);
+ expect_payment_sent(&nodes[0], payment_preimage, Some(Some(1000)), true, true);
+}
+
+#[test]
+fn two_hop_blinded_path_success() {
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+ let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
+
+ let amt_msat = 5000;
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
+ &chanmon_cfgs[2].keys_manager);
+
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+ pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], amt_msat, payment_hash, payment_secret);
+ claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
+}
+
+#[derive(PartialEq)]
+enum ReceiveCheckFail {
+ // The recipient fails the payment upon `PaymentClaimable`.
+ RecipientFail,
+ // Failure to decode the recipient's onion payload.
+ OnionDecodeFail,
+ // The incoming HTLC did not satisfy our requirements; in this case it underpaid us according to
+ // the expected receive amount in the onion.
+ ReceiveRequirements,
+ // The incoming HTLC errors when added to the Channel, in this case due to the HTLC being
+ // delivered out-of-order with a shutdown message.
+ ChannelCheck,
+ // The HTLC is successfully added to the inbound channel but fails receive checks in
+ // process_pending_htlc_forwards.
+ ProcessPendingHTLCsCheck,
+ // The HTLC violates the `PaymentConstraints` contained within the receiver's encrypted payload.
+ PaymentConstraints,
+}
+
+#[test]
+fn multi_hop_receiver_fail() {
+ do_multi_hop_receiver_fail(ReceiveCheckFail::RecipientFail);
+ do_multi_hop_receiver_fail(ReceiveCheckFail::OnionDecodeFail);
+ do_multi_hop_receiver_fail(ReceiveCheckFail::ReceiveRequirements);
+ do_multi_hop_receiver_fail(ReceiveCheckFail::ChannelCheck);
+ do_multi_hop_receiver_fail(ReceiveCheckFail::ProcessPendingHTLCsCheck);
+ do_multi_hop_receiver_fail(ReceiveCheckFail::PaymentConstraints);
+}
+
+fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) {
+ // Test that the receiver to a multihop blinded path fails back correctly.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ // We need the session priv to construct an invalid onion packet later.
+ let session_priv = [3; 32];
+ *nodes[0].keys_manager.override_random_bytes.lock().unwrap() = Some(session_priv);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+ let (chan_upd_1_2, chan_id_1_2) = {
+ let (chan_upd, _, channel_id, ..) = create_announced_chan_between_nodes_with_value(
+ &nodes, 1, 2, 1_000_000, 0
+ );
+ (chan_upd.contents, channel_id)
+ };
+
+ let amt_msat = 5000;
+ let final_cltv_delta = if check == ReceiveCheckFail::ProcessPendingHTLCsCheck {
+ // Set the final CLTV expiry too low to trigger the failure in process_pending_htlc_forwards.
+ Some(TEST_FINAL_CLTV as u16 - 2)
+ } else { None };
+ let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), final_cltv_delta);
+ let mut route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
+ &chanmon_cfgs[2].keys_manager);
+
+ let route = if check == ReceiveCheckFail::ProcessPendingHTLCsCheck {
+ let mut route = get_route(&nodes[0], &route_params).unwrap();
+ // Set the final CLTV expiry too low to trigger the failure in process_pending_htlc_forwards.
+ route.paths[0].blinded_tail.as_mut().map(|bt| bt.excess_final_cltv_expiry_delta = TEST_FINAL_CLTV - 2);
+ route
+ } else if check == ReceiveCheckFail::PaymentConstraints {
+ // Create a blinded path where the receiver's encrypted payload has an htlc_minimum_msat that is
+ // violated by `amt_msat`, and stick it in the route_params without changing the corresponding
+ // BlindedPayInfo (to ensure pathfinding still succeeds).
+ let high_htlc_min_bp = {
+ let mut high_htlc_minimum_upd = chan_upd_1_2.clone();
+ high_htlc_minimum_upd.htlc_minimum_msat = amt_msat + 1000;
+ let high_htlc_min_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&high_htlc_minimum_upd],
+ &chanmon_cfgs[2].keys_manager);
+ if let Payee::Blinded { route_hints, .. } = high_htlc_min_params.payment_params.payee {
+ route_hints[0].1.clone()
+ } else { panic!() }
+ };
+ if let Payee::Blinded { ref mut route_hints, .. } = route_params.payment_params.payee {
+ route_hints[0].1 = high_htlc_min_bp;
+ } else { panic!() }
+ find_route(&nodes[0], &route_params).unwrap()
+ } else {
+ find_route(&nodes[0], &route_params).unwrap()
+ };
+ node_cfgs[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+
+ let mut payment_event_0_1 = {
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+ SendEvent::from_event(ev)
+ };
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]);
+ check_added_monitors!(nodes[1], 0);
+ do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, false, false);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(&nodes[1], 1);
- assert!(update_fail.update_fail_htlcs.len() == 1);
- let fail_msg = update_fail.update_fail_htlcs[0].clone();
- nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
- commitment_signed_dance!(nodes[0], nodes[1], update_fail.commitment_signed, false);
+
+ let mut payment_event_1_2 = {
+ let mut events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
+ SendEvent::from_event(ev)
+ };
+
+ match check {
+ ReceiveCheckFail::RecipientFail => {
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]);
+ check_added_monitors!(nodes[2], 0);
+ do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_payment_claimable(
+ &nodes[2].node.get_and_clear_pending_events()[0], payment_hash, payment_secret, amt_msat,
+ None, nodes[2].node.get_our_node_id()
+ );
+ nodes[2].node.fail_htlc_backwards(&payment_hash);
+ expect_pending_htlcs_forwardable_conditions(
+ nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]
+ );
+ nodes[2].node.process_pending_htlc_forwards();
+ check_added_monitors!(nodes[2], 1);
+ },
+ ReceiveCheckFail::OnionDecodeFail => {
+ let session_priv = SecretKey::from_slice(&session_priv).unwrap();
+ let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
+ let cur_height = nodes[0].best_block_info().1;
+ let (mut onion_payloads, ..) = onion_utils::build_onion_payloads(
+ &route.paths[0], amt_msat, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
+
+ let update_add = &mut payment_event_1_2.msgs[0];
+ onion_payloads.last_mut().map(|p| {
+ if let msgs::OutboundOnionPayload::BlindedReceive { ref mut intro_node_blinding_point, .. } = p {
+ // The receiver should error if both the update_add blinding_point and the
+ // intro_node_blinding_point are set.
+ assert!(intro_node_blinding_point.is_none() && update_add.blinding_point.is_some());
+ *intro_node_blinding_point = Some(PublicKey::from_slice(&[2; 33]).unwrap());
+ } else { panic!() }
+ });
+ update_add.onion_routing_packet = onion_utils::construct_onion_packet(
+ vec![onion_payloads.pop().unwrap()], vec![onion_keys.pop().unwrap()], [0; 32],
+ &payment_hash
+ ).unwrap();
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update_add);
+ check_added_monitors!(nodes[2], 0);
+ do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
+ },
+ ReceiveCheckFail::ReceiveRequirements => {
+ let update_add = &mut payment_event_1_2.msgs[0];
+ update_add.amount_msat -= 1;
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update_add);
+ check_added_monitors!(nodes[2], 0);
+ do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
+ },
+ ReceiveCheckFail::ChannelCheck => {
+ nodes[2].node.close_channel(&chan_id_1_2, &nodes[1].node.get_our_node_id()).unwrap();
+ let node_2_shutdown = get_event_msg!(nodes[2], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_shutdown(&nodes[2].node.get_our_node_id(), &node_2_shutdown);
+ let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
+
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]);
+ nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event_1_2.commitment_msg);
+ check_added_monitors!(nodes[2], 1);
+
+ nodes[2].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
+ commitment_signed_dance!(nodes[2], nodes[1], (), false, true, false, false);
+ },
+ ReceiveCheckFail::ProcessPendingHTLCsCheck => {
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]);
+ check_added_monitors!(nodes[2], 0);
+ do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[2],
+ vec![HTLCDestination::FailedPayment { payment_hash }]);
+ check_added_monitors!(nodes[2], 1);
+ },
+ ReceiveCheckFail::PaymentConstraints => {
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]);
+ check_added_monitors!(nodes[2], 0);
+ do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
+ }
+ }
+
+ let updates_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert_eq!(updates_2_1.update_fail_malformed_htlcs.len(), 1);
+ let update_malformed = &updates_2_1.update_fail_malformed_htlcs[0];
+ assert_eq!(update_malformed.sha256_of_onion, [0; 32]);
+ assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
+ nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), update_malformed);
+ do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, true, false);
+
+ let updates_1_0 = if check == ReceiveCheckFail::ChannelCheck {
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ events.into_iter().find_map(|ev| {
+ match ev {
+ MessageSendEvent:: UpdateHTLCs { node_id, updates } => {
+ assert_eq!(node_id, nodes[0].node.get_our_node_id());
+ return Some(updates)
+ },
+ MessageSendEvent::SendClosingSigned { .. } => None,
+ _ => panic!()
+ }
+ }).unwrap()
+ } else { get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()) };
+ assert_eq!(updates_1_0.update_fail_htlcs.len(), 1);
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_1_0.update_fail_htlcs[0]);
+ do_commitment_signed_dance(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, false, false);
expect_payment_failed_conditions(&nodes[0], payment_hash, false,
PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
}
htlc_id: u64,
err_packet: msgs::OnionErrorPacket,
},
+ FailMalformedHTLC {
+ htlc_id: u64,
+ failure_code: u16,
+ sha256_of_onion: [u8; 32],
+ },
}
macro_rules! define_state_flags {
.ok();
if funding_signed.is_none() {
- log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
- self.signer_pending_funding = true;
+ #[cfg(not(async_signing))] {
+ panic!("Failed to get signature for funding_signed");
+ }
+ #[cfg(async_signing)] {
+ log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
+ self.signer_pending_funding = true;
+ }
} else if self.signer_pending_funding {
log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
self.signer_pending_funding = false;
feerate: u32,
}
+/// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
+/// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
+trait FailHTLCContents {
+ type Message: FailHTLCMessageName;
+ fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
+ fn to_inbound_htlc_state(self) -> InboundHTLCState;
+ fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
+}
+impl FailHTLCContents for msgs::OnionErrorPacket {
+ type Message = msgs::UpdateFailHTLC;
+ fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
+ msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
+ }
+ fn to_inbound_htlc_state(self) -> InboundHTLCState {
+ InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
+ }
+ fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
+ HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
+ }
+}
+impl FailHTLCContents for (u16, [u8; 32]) {
+ type Message = msgs::UpdateFailMalformedHTLC; // (failure_code, sha256_of_onion)
+ fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
+ msgs::UpdateFailMalformedHTLC {
+ htlc_id,
+ channel_id,
+ failure_code: self.0,
+ sha256_of_onion: self.1
+ }
+ }
+ fn to_inbound_htlc_state(self) -> InboundHTLCState {
+ InboundHTLCState::LocalRemoved(
+ InboundHTLCRemovalReason::FailMalformed((self.1, self.0))
+ )
+ }
+ fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
+ HTLCUpdateAwaitingACK::FailMalformedHTLC {
+ htlc_id,
+ failure_code: self.0,
+ sha256_of_onion: self.1
+ }
+ }
+}
+
+trait FailHTLCMessageName {
+ fn name() -> &'static str;
+}
+impl FailHTLCMessageName for msgs::UpdateFailHTLC {
+ fn name() -> &'static str {
+ "update_fail_htlc"
+ }
+}
+impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
+ fn name() -> &'static str {
+ "update_fail_malformed_htlc"
+ }
+}
+
impl<SP: Deref> Channel<SP> where
SP::Target: SignerProvider,
<SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
return UpdateFulfillFetch::DuplicateClaim {};
}
},
- &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
+ &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
+ {
if htlc_id_arg == htlc_id {
log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
// TODO: We may actually be able to switch to a fulfill here, though its
.map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
}
+ /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
+ /// want to fail blinded HTLCs where we are not the intro node.
+ ///
+ /// See [`Self::queue_fail_htlc`] for more info.
+ pub fn queue_fail_malformed_htlc<L: Deref>(
+ &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
+ ) -> Result<(), ChannelError> where L::Target: Logger {
+ self.fail_htlc(htlc_id_arg, (failure_code, sha256_of_onion), true, logger)
+ .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+ }
+
/// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
/// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
/// however, fail more than once as we wait for an upstream failure to be irrevocably committed
/// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
/// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
/// [`ChannelError::Ignore`].
- fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
- -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
+ fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
+ &mut self, htlc_id_arg: u64, err_packet: E, mut force_holding_cell: bool,
+ logger: &L
+ ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
panic!("Was asked to fail an HTLC when channel was not in an operational state");
}
return Ok(None);
}
},
- &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
+ &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
+ {
if htlc_id_arg == htlc_id {
debug_assert!(false, "Tried to fail an HTLC that was already failed");
return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
}
}
log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
- self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
- htlc_id: htlc_id_arg,
- err_packet,
- });
+ self.context.holding_cell_htlc_updates.push(err_packet.to_htlc_update_awaiting_ack(htlc_id_arg));
return Ok(None);
}
- log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
+ log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
+ E::Message::name(), &self.context.channel_id());
{
let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
- htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
+ htlc.state = err_packet.clone().to_inbound_htlc_state();
}
- Ok(Some(msgs::UpdateFailHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc_id_arg,
- reason: err_packet
- }))
+ Ok(Some(err_packet.to_message(htlc_id_arg, self.context.channel_id())))
}
// Message handlers:
}
}
},
+ &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ match self.fail_htlc(htlc_id, (failure_code, sha256_of_onion), false, logger) {
+ Ok(update_fail_malformed_opt) => {
+ debug_assert!(update_fail_malformed_opt.is_some()); // See above comment
+ update_fail_count += 1;
+ },
+ Err(e) => {
+ if let ChannelError::Ignore(_) = e {}
+ else {
+ panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
+ }
+ }
+ }
+ },
}
}
if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
/// Indicates that the signer may have some signatures for us, so we should retry if we're
/// blocked.
- #[allow(unused)]
+ #[cfg(async_signing)]
pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
let commitment_update = if self.context.signer_pending_commitment_update {
self.get_last_commitment_update_for_send(logger).ok()
}
update
} else {
- if !self.context.signer_pending_commitment_update {
- log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
- self.context.signer_pending_commitment_update = true;
+ #[cfg(not(async_signing))] {
+ panic!("Failed to get signature for new commitment state");
+ }
+ #[cfg(async_signing)] {
+ if !self.context.signer_pending_commitment_update {
+ log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
+ self.context.signer_pending_commitment_update = true;
+ }
+ return Err(());
}
- return Err(());
};
Ok(msgs::CommitmentUpdate {
update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
let funding_created = self.get_funding_created_msg(logger);
if funding_created.is_none() {
- if !self.context.signer_pending_funding {
- log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
- self.context.signer_pending_funding = true;
+ #[cfg(not(async_signing))] {
+ panic!("Failed to get signature for new funding creation");
+ }
+ #[cfg(async_signing)] {
+ if !self.context.signer_pending_funding {
+ log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
+ self.context.signer_pending_funding = true;
+ }
}
}
/// Indicates that the signer may have some signatures for us, so we should retry if we're
/// blocked.
- #[allow(unused)]
+ #[cfg(async_signing)]
pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
if self.context.signer_pending_funding && self.context.is_outbound() {
log_trace!(logger, "Signer unblocked a funding_created");
let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
+ // Vec of (htlc_id, failure_code, sha256_of_onion)
+ let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
(self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
for update in self.context.holding_cell_htlc_updates.iter() {
match update {
htlc_id.write(writer)?;
err_packet.write(writer)?;
}
+ &HTLCUpdateAwaitingACK::FailMalformedHTLC {
+ htlc_id, failure_code, sha256_of_onion
+ } => {
+ // We don't want to break downgrading by adding a new variant, so write a dummy
+ // `::FailHTLC` variant and write the real malformed error as an optional TLV.
+ malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
+
+ let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
+ 2u8.write(writer)?;
+ htlc_id.write(writer)?;
+ dummy_err_packet.write(writer)?;
+ }
}
}
(38, self.context.is_batch_funding, option),
(39, pending_outbound_blinding_points, optional_vec),
(41, holding_cell_blinding_points, optional_vec),
+ (43, malformed_htlcs, optional_vec), // Added in 0.0.119
});
Ok(())
let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
+ let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
+
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
(1, minimum_depth, option),
(38, is_batch_funding, option),
(39, pending_outbound_blinding_points_opt, optional_vec),
(41, holding_cell_blinding_points_opt, optional_vec),
+ (43, malformed_htlcs, optional_vec), // Added in 0.0.119
});
let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
}
+ if let Some(malformed_htlcs) = malformed_htlcs {
+ for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
+ let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
+ if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
+ let matches = *htlc_id == malformed_htlc_id;
+ if matches { debug_assert!(err_packet.data.is_empty()) }
+ matches
+ } else { false }
+ }).ok_or(DecodeError::InvalidValue)?;
+ let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
+ htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
+ };
+ let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
+ }
+ }
+
Ok(Channel {
context: ChannelContext {
user_id,
use bitcoin::blockdata::transaction::{Transaction, TxOut};
use bitcoin::blockdata::opcodes;
use bitcoin::network::constants::Network;
+ use crate::ln::onion_utils::INVALID_ONION_BLINDING;
use crate::ln::{PaymentHash, PaymentPreimage};
use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
}
#[test]
- fn blinding_point_skimmed_fee_ser() {
- // Ensure that channel blinding points and skimmed fees are (de)serialized properly.
+ fn blinding_point_skimmed_fee_malformed_ser() {
+ // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
+ // properly.
let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
let secp_ctx = Secp256k1::new();
let seed = [42; 32];
payment_preimage: PaymentPreimage([42; 32]),
htlc_id: 0,
};
- let mut holding_cell_htlc_updates = Vec::with_capacity(10);
- for i in 0..10 {
- if i % 3 == 0 {
+ let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
+ htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
+ };
+ let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
+ htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
+ };
+ let mut holding_cell_htlc_updates = Vec::with_capacity(12);
+ for i in 0..12 {
+ if i % 5 == 0 {
holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
- } else if i % 3 == 1 {
+ } else if i % 5 == 1 {
holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
- } else {
+ } else if i % 5 == 2 {
let mut dummy_add = dummy_holding_cell_add_htlc.clone();
if let HTLCUpdateAwaitingACK::AddHTLC {
ref mut blinding_point, ref mut skimmed_fee_msat, ..
*skimmed_fee_msat = Some(42);
} else { panic!() }
holding_cell_htlc_updates.push(dummy_add);
+ } else if i % 5 == 3 {
+ holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
+ } else {
+ holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
}
}
chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
use crate::offers::parse::Bolt12SemanticError;
use crate::offers::refund::{Refund, RefundBuilder};
-use crate::onion_message::{Destination, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message};
+use crate::onion_message::{Destination, MessageRouter, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message};
use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider};
use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
/// Information about where a received HTLC('s onion) has indicated the HTLC should go.
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+#[cfg_attr(test, derive(Debug, PartialEq))]
pub enum PendingHTLCRouting {
/// An HTLC which should be forwarded on to another node.
Forward {
/// [`Event::PaymentClaimable::onion_fields`] as
/// [`RecipientOnionFields::custom_tlvs`].
custom_tlvs: Vec<(u64, Vec<u8>)>,
+ /// Set if this HTLC is the final hop in a multi-hop blinded path.
+ requires_blinded_error: bool,
},
/// The onion indicates that this is for payment to us but which contains the preimage for
/// claiming included, and is unrelated to any invoice we'd previously generated (aka a
}
/// Information used to forward or fail this HTLC that is being forwarded within a blinded path.
-#[derive(Clone, Copy, Hash, PartialEq, Eq)]
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct BlindedForward {
/// The `blinding_point` that was set in the inbound [`msgs::UpdateAddHTLC`], or in the inbound
/// onion payload if we're the introduction node. Useful for calculating the next hop's
impl PendingHTLCRouting {
// Used to override the onion failure code and data if the HTLC is blinded.
fn blinded_failure(&self) -> Option<BlindedFailure> {
- // TODO: needs update when we support receiving to multi-hop blinded paths
- if let Self::Forward { blinded: Some(_), .. } = self {
- Some(BlindedFailure::FromIntroductionNode)
- } else {
- None
+ // TODO: needs update when we support forwarding blinded HTLCs as non-intro node
+ match self {
+ Self::Forward { blinded: Some(_), .. } => Some(BlindedFailure::FromIntroductionNode),
+ Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
+ _ => None,
}
}
}
/// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it
/// should go next.
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+#[cfg_attr(test, derive(Debug, PartialEq))]
pub struct PendingHTLCInfo {
/// Further routing details based on whether the HTLC is being forwarded or received.
pub routing: PendingHTLCRouting,
Fail(HTLCFailureMsg),
}
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
pub(super) struct PendingAddHTLCInfo {
pub(super) forward_info: PendingHTLCInfo,
prev_user_channel_id: u128,
}
+#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
pub(super) enum HTLCForwardInfo {
AddHTLC(PendingAddHTLCInfo),
FailHTLC {
htlc_id: u64,
err_packet: msgs::OnionErrorPacket,
},
+ FailMalformedHTLC {
+ htlc_id: u64,
+ failure_code: u16,
+ sha256_of_onion: [u8; 32],
+ },
}
// Used for failing blinded HTLCs backwards correctly.
-#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
enum BlindedFailure {
FromIntroductionNode,
- // Another variant will be added here for non-intro nodes.
+ FromBlindedNode,
}
/// Tracks the inbound corresponding to an outbound HTLC
msg, &self.node_signer, &self.logger, &self.secp_ctx
)?;
- let is_blinded = match next_hop {
+ let is_intro_node_forward = match next_hop {
onion_utils::Hop::Forward {
+ // TODO: update this when we support blinded forwarding as non-intro node
next_hop_data: msgs::InboundOnionPayload::BlindedForward { .. }, ..
} => true,
- _ => false, // TODO: update this when we support receiving to multi-hop blinded paths
+ _ => false,
};
macro_rules! return_err {
WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
"Failed to accept/forward incoming HTLC: {}", $msg
);
- let (err_code, err_data) = if is_blinded {
+ // If `msg.blinding_point` is set, we must always fail with malformed.
+ if msg.blinding_point.is_some() {
+ return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ sha256_of_onion: [0; 32],
+ failure_code: INVALID_ONION_BLINDING,
+ }));
+ }
+
+ let (err_code, err_data) = if is_intro_node_forward {
(INVALID_ONION_BLINDING, &[0; 32][..])
} else { ($err_code, $data) };
return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
{
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+ if msg.blinding_point.is_some() {
+ return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
+ msgs::UpdateFailMalformedHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ sha256_of_onion: [0; 32],
+ failure_code: INVALID_ONION_BLINDING,
+ }
+ ))
+ }
return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
let next_hop = match onion_utils::decode_next_payment_hop(
phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
- payment_hash, &self.node_signer
+ payment_hash, None, &self.node_signer
) {
Ok(res) => res,
Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
}
},
- HTLCForwardInfo::FailHTLC { .. } => {
+ HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
// Channel went away before we could fail it. This implies
// the channel is now on chain and our counterparty is
// trying to broadcast the HTLC-Timeout, but that's their
continue;
}
},
+ HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ log_trace!(self.logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+ if let Err(e) = chan.queue_fail_malformed_htlc(htlc_id, failure_code, sha256_of_onion, &self.logger) {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+ } else {
+ panic!("Stated return value requirements in queue_fail_malformed_htlc() were not met");
+ }
+ // fail-backs are best-effort, we probably already have one
+ // pending, and if not that's OK, if not, the channel is on
+ // the chain and sending the HTLC-Timeout is their problem.
+ continue;
+ }
+ },
}
}
} else {
}) => {
let blinded_failure = routing.blinded_failure();
let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing {
- PendingHTLCRouting::Receive { payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret, custom_tlvs } => {
+ PendingHTLCRouting::Receive {
+ payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret,
+ custom_tlvs, requires_blinded_error: _
+ } => {
let _legacy_hop_data = Some(payment_data.clone());
let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret),
payment_metadata, custom_tlvs };
htlc_id: $htlc.prev_hop.htlc_id,
incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
phantom_shared_secret,
- blinded_failure: None,
+ blinded_failure,
}), payment_hash,
HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
HTLCDestination::FailedPayment { payment_hash: $payment_hash },
},
};
},
- HTLCForwardInfo::FailHTLC { .. } => {
+ HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
panic!("Got pending fail of our own HTLC");
}
}
"Failing {}HTLC with payment_hash {} backwards from us: {:?}",
if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
);
- let err_packet = match blinded_failure {
+ let failure = match blinded_failure {
Some(BlindedFailure::FromIntroductionNode) => {
let blinded_onion_error = HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]);
- blinded_onion_error.get_encrypted_failure_packet(
+ let err_packet = blinded_onion_error.get_encrypted_failure_packet(
incoming_packet_shared_secret, phantom_shared_secret
- )
+ );
+ HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
+ },
+ Some(BlindedFailure::FromBlindedNode) => {
+ HTLCForwardInfo::FailMalformedHTLC {
+ htlc_id: *htlc_id,
+ failure_code: INVALID_ONION_BLINDING,
+ sha256_of_onion: [0; 32]
+ }
},
None => {
- onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret)
+ let err_packet = onion_error.get_encrypted_failure_packet(
+ incoming_packet_shared_secret, phantom_shared_secret
+ );
+ HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
}
};
}
match forward_htlcs.entry(*short_channel_id) {
hash_map::Entry::Occupied(mut entry) => {
- entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet });
+ entry.get_mut().push(failure);
},
hash_map::Entry::Vacant(entry) => {
- entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }));
+ entry.insert(vec!(failure));
}
}
mem::drop(forward_htlcs);
Err(e) => PendingHTLCStatus::Fail(e)
};
let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+ if msg.blinding_point.is_some() {
+ return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
+ msgs::UpdateFailMalformedHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ sha256_of_onion: [0; 32],
+ failure_code: INVALID_ONION_BLINDING,
+ }
+ ))
+ }
// If the update_add is completely bogus, the call will Err and we will close,
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
// want to reject the new HTLC and fail it backwards instead of forwarding.
/// attempted in every channel, or in the specifically provided channel.
///
/// [`ChannelSigner`]: crate::sign::ChannelSigner
- #[cfg(test)] // This is only implemented for one signer method, and should be private until we
- // actually finish implementing it fully.
+ #[cfg(async_signing)]
pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
///
/// # Privacy
///
- /// Uses a one-hop [`BlindedPath`] for the offer with [`ChannelManager::get_our_node_id`] as the
- /// introduction node and a derived signing pubkey for recipient privacy. As such, currently,
- /// the node must be announced. Otherwise, there is no way to find a path to the introduction
- /// node in order to send the [`InvoiceRequest`].
+ /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the offer.
+ /// However, if one is not found, uses a one-hop [`BlindedPath`] with
+ /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
+ /// the node must be announced, otherwise, there is no way to find a path to the introduction in
+ /// order to send the [`InvoiceRequest`].
+ ///
+ /// Also, uses a derived signing pubkey in the offer for recipient privacy.
///
/// # Limitations
///
/// Requires a direct connection to the introduction node in the responding [`InvoiceRequest`]'s
/// reply path.
///
+ /// # Errors
+ ///
+ /// Errors if the parameterized [`Router`] is unable to create a blinded path for the offer.
+ ///
/// This is not exported to bindings users as builder patterns don't map outside of move semantics.
///
/// [`Offer`]: crate::offers::offer::Offer
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
pub fn create_offer_builder(
&self, description: String
- ) -> OfferBuilder<DerivedMetadata, secp256k1::All> {
+ ) -> Result<OfferBuilder<DerivedMetadata, secp256k1::All>, Bolt12SemanticError> {
let node_id = self.get_our_node_id();
let expanded_key = &self.inbound_payment_key;
let entropy = &*self.entropy_source;
let secp_ctx = &self.secp_ctx;
- let path = self.create_one_hop_blinded_path();
- OfferBuilder::deriving_signing_pubkey(description, node_id, expanded_key, entropy, secp_ctx)
+ let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ let builder = OfferBuilder::deriving_signing_pubkey(
+ description, node_id, expanded_key, entropy, secp_ctx
+ )
.chain_hash(self.chain_hash)
- .path(path)
+ .path(path);
+
+ Ok(builder)
}
/// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
///
/// # Privacy
///
- /// Uses a one-hop [`BlindedPath`] for the refund with [`ChannelManager::get_our_node_id`] as
- /// the introduction node and a derived payer id for payer privacy. As such, currently, the
- /// node must be announced. Otherwise, there is no way to find a path to the introduction node
- /// in order to send the [`Bolt12Invoice`].
+ /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the refund.
+ /// However, if one is not found, uses a one-hop [`BlindedPath`] with
+ /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
+ /// the node must be announced, otherwise, there is no way to find a path to the introduction in
+ /// order to send the [`Bolt12Invoice`].
+ ///
+ /// Also, uses a derived payer id in the refund for payer privacy.
///
/// # Limitations
///
///
/// # Errors
///
- /// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link
- /// or if `amount_msats` is invalid.
+ /// Errors if:
+ /// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
+ /// - `amount_msats` is invalid, or
+ /// - the parameterized [`Router`] is unable to create a blinded path for the refund.
///
/// This is not exported to bindings users as builder patterns don't map outside of move semantics.
///
/// [`Refund`]: crate::offers::refund::Refund
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
/// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
+ /// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
pub fn create_refund_builder(
&self, description: String, amount_msats: u64, absolute_expiry: Duration,
payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
let expanded_key = &self.inbound_payment_key;
let entropy = &*self.entropy_source;
let secp_ctx = &self.secp_ctx;
- let path = self.create_one_hop_blinded_path();
+ let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
let builder = RefundBuilder::deriving_payer_id(
description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
)?
///
/// # Errors
///
- /// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link
- /// or if the provided parameters are invalid for the offer.
+ /// Errors if:
+ /// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
+ /// - the provided parameters are invalid for the offer,
+ /// - the parameterized [`Router`] is unable to create a blinded reply path for the invoice
+ /// request.
///
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
/// [`InvoiceRequest::quantity`]: crate::offers::invoice_request::InvoiceRequest::quantity
None => builder,
Some(payer_note) => builder.payer_note(payer_note),
};
-
let invoice_request = builder.build_and_sign()?;
- let reply_path = self.create_one_hop_blinded_path();
+ let reply_path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
let expiration = StaleExpiration::TimerTicks(1);
self.pending_outbound_payments
/// node meeting the aforementioned criteria, but there's no guarantee that they will be
/// received and no retries will be made.
///
+ /// # Errors
+ ///
+ /// Errors if the parameterized [`Router`] is unable to create a blinded payment path or reply
+ /// path for the invoice.
+ ///
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> {
let expanded_key = &self.inbound_payment_key;
match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
Ok((payment_hash, payment_secret)) => {
- let payment_paths = vec![
- self.create_one_hop_blinded_payment_path(payment_secret),
- ];
+ let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret)
+ .map_err(|_| Bolt12SemanticError::MissingPaths)?;
+
#[cfg(not(feature = "no-std"))]
let builder = refund.respond_using_derived_keys(
payment_paths, payment_hash, expanded_key, entropy
payment_paths, payment_hash, created_at, expanded_key, entropy
)?;
let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
- let reply_path = self.create_one_hop_blinded_path();
+ let reply_path = self.create_blinded_path()
+ .map_err(|_| Bolt12SemanticError::MissingPaths)?;
let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
if refund.paths().is_empty() {
inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
}
- /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction
- /// node.
- fn create_one_hop_blinded_path(&self) -> BlindedPath {
+ /// Creates a blinded path by delegating to [`MessageRouter::create_blinded_paths`].
+ ///
+ /// Errors if the `MessageRouter` errors or returns an empty `Vec`.
+ fn create_blinded_path(&self) -> Result<BlindedPath, ()> {
+ let recipient = self.get_our_node_id();
let entropy_source = self.entropy_source.deref();
let secp_ctx = &self.secp_ctx;
- BlindedPath::one_hop_for_message(self.get_our_node_id(), entropy_source, secp_ctx).unwrap()
+
+ let peers = self.per_peer_state.read().unwrap()
+ .iter()
+ .filter(|(_, peer)| peer.lock().unwrap().latest_features.supports_onion_messages())
+ .map(|(node_id, _)| *node_id)
+ .collect::<Vec<_>>();
+
+ self.router
+ .create_blinded_paths(recipient, peers, entropy_source, secp_ctx)
+ .and_then(|paths| paths.into_iter().next().ok_or(()))
}
- /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction
- /// node.
- fn create_one_hop_blinded_payment_path(
- &self, payment_secret: PaymentSecret
- ) -> (BlindedPayInfo, BlindedPath) {
+ /// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to
+ /// [`Router::create_blinded_payment_paths`].
+ fn create_blinded_payment_paths(
+ &self, amount_msats: u64, payment_secret: PaymentSecret
+ ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
let entropy_source = self.entropy_source.deref();
let secp_ctx = &self.secp_ctx;
+ let first_hops = self.list_usable_channels();
let payee_node_id = self.get_our_node_id();
- let max_cltv_expiry = self.best_block.read().unwrap().height() + LATENCY_GRACE_PERIOD_BLOCKS;
+ let max_cltv_expiry = self.best_block.read().unwrap().height() + CLTV_FAR_FAR_AWAY
+ + LATENCY_GRACE_PERIOD_BLOCKS;
let payee_tlvs = ReceiveTlvs {
payment_secret,
payment_constraints: PaymentConstraints {
htlc_minimum_msat: 1,
},
};
- // TODO: Err for overflow?
- BlindedPath::one_hop_for_payment(
- payee_node_id, payee_tlvs, entropy_source, secp_ctx
- ).unwrap()
+ self.router.create_blinded_payment_paths(
+ payee_node_id, first_hops, payee_tlvs, amount_msats, entropy_source, secp_ctx
+ )
}
/// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
let amount_msats = match InvoiceBuilder::<DerivedSigningPubkey>::amount_msats(
&invoice_request
) {
- Ok(amount_msats) => Some(amount_msats),
+ Ok(amount_msats) => amount_msats,
Err(error) => return Some(OffersMessage::InvoiceError(error.into())),
};
let invoice_request = match invoice_request.verify(expanded_key, secp_ctx) {
return Some(OffersMessage::InvoiceError(error.into()));
},
};
- let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
- match self.create_inbound_payment(amount_msats, relative_expiry, None) {
- Ok((payment_hash, payment_secret)) if invoice_request.keys.is_some() => {
- let payment_paths = vec![
- self.create_one_hop_blinded_payment_path(payment_secret),
- ];
- #[cfg(not(feature = "no-std"))]
- let builder = invoice_request.respond_using_derived_keys(
- payment_paths, payment_hash
- );
- #[cfg(feature = "no-std")]
- let created_at = Duration::from_secs(
- self.highest_seen_timestamp.load(Ordering::Acquire) as u64
- );
- #[cfg(feature = "no-std")]
- let builder = invoice_request.respond_using_derived_keys_no_std(
- payment_paths, payment_hash, created_at
- );
- match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
- Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
- Err(error) => Some(OffersMessage::InvoiceError(error.into())),
- }
- },
- Ok((payment_hash, payment_secret)) => {
- let payment_paths = vec![
- self.create_one_hop_blinded_payment_path(payment_secret),
- ];
- #[cfg(not(feature = "no-std"))]
- let builder = invoice_request.respond_with(payment_paths, payment_hash);
- #[cfg(feature = "no-std")]
- let created_at = Duration::from_secs(
- self.highest_seen_timestamp.load(Ordering::Acquire) as u64
- );
- #[cfg(feature = "no-std")]
- let builder = invoice_request.respond_with_no_std(
- payment_paths, payment_hash, created_at
- );
- let response = builder.and_then(|builder| builder.allow_mpp().build())
- .map_err(|e| OffersMessage::InvoiceError(e.into()))
- .and_then(|invoice|
- match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) {
- Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
- Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError(
- InvoiceError::from_string("Failed signing invoice".to_string())
- )),
- Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
- InvoiceError::from_string("Failed invoice signature verification".to_string())
- )),
- });
- match response {
- Ok(invoice) => Some(invoice),
- Err(error) => Some(error),
- }
+ let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
+ let (payment_hash, payment_secret) = match self.create_inbound_payment(
+ Some(amount_msats), relative_expiry, None
+ ) {
+ Ok((payment_hash, payment_secret)) => (payment_hash, payment_secret),
+ Err(()) => {
+ let error = Bolt12SemanticError::InvalidAmount;
+ return Some(OffersMessage::InvoiceError(error.into()));
},
+ };
+
+ let payment_paths = match self.create_blinded_payment_paths(
+ amount_msats, payment_secret
+ ) {
+ Ok(payment_paths) => payment_paths,
Err(()) => {
- Some(OffersMessage::InvoiceError(Bolt12SemanticError::InvalidAmount.into()))
+ let error = Bolt12SemanticError::MissingPaths;
+ return Some(OffersMessage::InvoiceError(error.into()));
},
+ };
+
+ #[cfg(feature = "no-std")]
+ let created_at = Duration::from_secs(
+ self.highest_seen_timestamp.load(Ordering::Acquire) as u64
+ );
+
+ if invoice_request.keys.is_some() {
+ #[cfg(not(feature = "no-std"))]
+ let builder = invoice_request.respond_using_derived_keys(
+ payment_paths, payment_hash
+ );
+ #[cfg(feature = "no-std")]
+ let builder = invoice_request.respond_using_derived_keys_no_std(
+ payment_paths, payment_hash, created_at
+ );
+ match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
+ Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
+ Err(error) => Some(OffersMessage::InvoiceError(error.into())),
+ }
+ } else {
+ #[cfg(not(feature = "no-std"))]
+ let builder = invoice_request.respond_with(payment_paths, payment_hash);
+ #[cfg(feature = "no-std")]
+ let builder = invoice_request.respond_with_no_std(
+ payment_paths, payment_hash, created_at
+ );
+ let response = builder.and_then(|builder| builder.allow_mpp().build())
+ .map_err(|e| OffersMessage::InvoiceError(e.into()))
+ .and_then(|invoice|
+ match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) {
+ Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
+ Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError(
+ InvoiceError::from_string("Failed signing invoice".to_string())
+ )),
+ Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
+ InvoiceError::from_string("Failed invoice signature verification".to_string())
+ )),
+ });
+ match response {
+ Ok(invoice) => Some(invoice),
+ Err(error) => Some(error),
+ }
}
},
OffersMessage::Invoice(invoice) => {
(2, incoming_cltv_expiry, required),
(3, payment_metadata, option),
(5, custom_tlvs, optional_vec),
+ (7, requires_blinded_error, (default_value, false)),
},
(2, ReceiveKeysend) => {
(0, payment_preimage, required),
);
impl_writeable_tlv_based_enum!(BlindedFailure,
- (0, FromIntroductionNode) => {}, ;
+ (0, FromIntroductionNode) => {},
+ (2, FromBlindedNode) => {}, ;
);
impl_writeable_tlv_based!(HTLCPreviousHopData, {
(6, prev_funding_outpoint, required),
});
-impl_writeable_tlv_based_enum!(HTLCForwardInfo,
- (1, FailHTLC) => {
- (0, htlc_id, required),
- (2, err_packet, required),
- };
- (0, AddHTLC)
-);
+impl Writeable for HTLCForwardInfo {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ const FAIL_HTLC_VARIANT_ID: u8 = 1;
+ match self {
+ Self::AddHTLC(info) => {
+ 0u8.write(w)?;
+ info.write(w)?;
+ },
+ Self::FailHTLC { htlc_id, err_packet } => {
+ FAIL_HTLC_VARIANT_ID.write(w)?;
+ write_tlv_fields!(w, {
+ (0, htlc_id, required),
+ (2, err_packet, required),
+ });
+ },
+ Self::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ // Since this variant was added in 0.0.119, write this as `::FailHTLC` with an empty error
+ // packet so older versions have something to fail back with, but serialize the real data as
+ // optional TLVs for the benefit of newer versions.
+ FAIL_HTLC_VARIANT_ID.write(w)?;
+ let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
+ write_tlv_fields!(w, {
+ (0, htlc_id, required),
+ (1, failure_code, required),
+ (2, dummy_err_packet, required),
+ (3, sha256_of_onion, required),
+ });
+ },
+ }
+ Ok(())
+ }
+}
+
+impl Readable for HTLCForwardInfo {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let id: u8 = Readable::read(r)?;
+ Ok(match id {
+ 0 => Self::AddHTLC(Readable::read(r)?),
+ 1 => {
+ _init_and_read_len_prefixed_tlv_fields!(r, {
+ (0, htlc_id, required),
+ (1, malformed_htlc_failure_code, option),
+ (2, err_packet, required),
+ (3, sha256_of_onion, option),
+ });
+ if let Some(failure_code) = malformed_htlc_failure_code {
+ Self::FailMalformedHTLC {
+ htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
+ failure_code,
+ sha256_of_onion: sha256_of_onion.ok_or(DecodeError::InvalidValue)?,
+ }
+ } else {
+ Self::FailHTLC {
+ htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
+ err_packet: _init_tlv_based_struct_field!(err_packet, required),
+ }
+ }
+ },
+ _ => return Err(DecodeError::InvalidValue),
+ })
+ }
+}
impl_writeable_tlv_based!(PendingInboundPayment, {
(0, payment_secret, required),
use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::ChannelId;
- use crate::ln::channelmanager::{create_recv_pending_htlc_info, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
+ use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::{self, ErrorAction};
use crate::ln::msgs::ChannelMessageHandler;
+ use crate::prelude::*;
use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
use crate::util::errors::APIError;
+ use crate::util::ser::Writeable;
use crate::util::test_utils;
use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
use crate::sign::EntropySource;
check_spends!(txn[0], funding_tx);
}
}
+
+ #[test]
+ fn test_malformed_forward_htlcs_ser() {
+ // Ensure that `HTLCForwardInfo::FailMalformedHTLC`s are (de)serialized properly.
+ let chanmon_cfg = create_chanmon_cfgs(1);
+ let node_cfg = create_node_cfgs(1, &chanmon_cfg);
+ let persister;
+ let chain_monitor;
+ let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]);
+ let deserialized_chanmgr;
+ let mut nodes = create_network(1, &node_cfg, &chanmgrs);
+
+ let dummy_failed_htlc = |htlc_id| {
+ HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }, }
+ };
+ let dummy_malformed_htlc = |htlc_id| {
+ HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code: 0x4000, sha256_of_onion: [0; 32] }
+ };
+
+ let dummy_htlcs_1: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
+ if htlc_id % 2 == 0 {
+ dummy_failed_htlc(htlc_id)
+ } else {
+ dummy_malformed_htlc(htlc_id)
+ }
+ }).collect();
+
+ let dummy_htlcs_2: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
+ if htlc_id % 2 == 1 {
+ dummy_failed_htlc(htlc_id)
+ } else {
+ dummy_malformed_htlc(htlc_id)
+ }
+ }).collect();
+
+
+ let (scid_1, scid_2) = (42, 43);
+ let mut forward_htlcs = HashMap::new();
+ forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
+ forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
+
+ let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
+ *chanmgr_fwd_htlcs = forward_htlcs.clone();
+ core::mem::drop(chanmgr_fwd_htlcs);
+
+ reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr);
+
+ let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
+ for scid in [scid_1, scid_2].iter() {
+ let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap();
+ assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs);
+ }
+ assert!(deserialized_fwd_htlcs.is_empty());
+ core::mem::drop(deserialized_fwd_htlcs);
+
+ expect_pending_htlcs_forwardable!(nodes[0]);
+ }
}
#[cfg(ldk_bench)]
//! (see [BOLT-4](https://github.com/lightning/bolts/blob/master/04-onion-routing.md#basic-multi-part-payments) for more information).
//! - `Wumbo` - requires/supports that a node create large channels. Called `option_support_large_channel` in the spec.
//! (see [BOLT-2](https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#the-open_channel-message) for more information).
+//! - `AnchorsZeroFeeHtlcTx` - requires/supports that commitment transactions include anchor outputs
+//! and HTLC transactions are pre-signed with zero fee (see
+//! [BOLT-3](https://github.com/lightning/bolts/blob/master/03-transactions.md) for more
+//! information).
+//! - `RouteBlinding` - requires/supports that a node can relay payments over blinded paths
+//! (see [BOLT-4](https://github.com/lightning/bolts/blob/master/04-onion-routing.md#route-blinding) for more information).
//! - `ShutdownAnySegwit` - requires/supports that future segwit versions are allowed in `shutdown`
//! (see [BOLT-2](https://github.com/lightning/bolts/blob/master/02-peer-protocol.md) for more information).
//! - `OnionMessages` - requires/supports forwarding onion messages
//! for more info).
//! - `Keysend` - send funds to a node without an invoice
//! (see the [`Keysend` feature assignment proposal](https://github.com/lightning/bolts/issues/605#issuecomment-606679798) for more information).
-//! - `AnchorsZeroFeeHtlcTx` - requires/supports that commitment transactions include anchor outputs
-//! and HTLC transactions are pre-signed with zero fee (see
-//! [BOLT-3](https://github.com/lightning/bolts/blob/master/03-transactions.md) for more
-//! information).
//!
//! LDK knows about the following features, but does not support them:
//! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be
// Byte 2
BasicMPP | Wumbo | AnchorsNonzeroFeeHtlcTx | AnchorsZeroFeeHtlcTx,
// Byte 3
- ShutdownAnySegwit | Taproot,
+ RouteBlinding | ShutdownAnySegwit | Taproot,
// Byte 4
OnionMessages,
// Byte 5
// Byte 2
BasicMPP | Wumbo | AnchorsNonzeroFeeHtlcTx | AnchorsZeroFeeHtlcTx,
// Byte 3
- ShutdownAnySegwit | Taproot,
+ RouteBlinding | ShutdownAnySegwit | Taproot,
// Byte 4
OnionMessages,
// Byte 5
define_feature!(23, AnchorsZeroFeeHtlcTx, [InitContext, NodeContext, ChannelTypeContext],
"Feature flags for `option_anchors_zero_fee_htlc_tx`.", set_anchors_zero_fee_htlc_tx_optional,
set_anchors_zero_fee_htlc_tx_required, supports_anchors_zero_fee_htlc_tx, requires_anchors_zero_fee_htlc_tx);
+ define_feature!(25, RouteBlinding, [InitContext, NodeContext],
+ "Feature flags for `option_route_blinding`.", set_route_blinding_optional,
+ set_route_blinding_required, supports_route_blinding, requires_route_blinding);
define_feature!(27, ShutdownAnySegwit, [InitContext, NodeContext],
"Feature flags for `opt_shutdown_anysegwit`.", set_shutdown_any_segwit_optional,
set_shutdown_any_segwit_required, supports_shutdown_anysegwit, requires_shutdown_anysegwit);
init_features.set_basic_mpp_optional();
init_features.set_wumbo_optional();
init_features.set_anchors_zero_fee_htlc_tx_optional();
+ init_features.set_route_blinding_optional();
init_features.set_shutdown_any_segwit_optional();
init_features.set_onion_messages_optional();
init_features.set_channel_type_optional();
// Check that the flags are as expected:
// - option_data_loss_protect (req)
// - var_onion_optin (req) | static_remote_key (req) | payment_secret(req)
- // - basic_mpp | wumbo | anchors_zero_fee_htlc_tx
- // - opt_shutdown_anysegwit
+ // - basic_mpp | wumbo | option_anchors_zero_fee_htlc_tx
+ // - option_route_blinding | opt_shutdown_anysegwit
// - onion_messages
// - option_channel_type | option_scid_alias
// - option_zeroconf
assert_eq!(node_features.flags[0], 0b00000001);
assert_eq!(node_features.flags[1], 0b01010001);
assert_eq!(node_features.flags[2], 0b10001010);
- assert_eq!(node_features.flags[3], 0b00001000);
+ assert_eq!(node_features.flags[3], 0b00001010);
assert_eq!(node_features.flags[4], 0b10000000);
assert_eq!(node_features.flags[5], 0b10100000);
assert_eq!(node_features.flags[6], 0b00001000);
)
}
+/// Like `get_route` above, but adds a random CLTV offset to the final hop.
+pub fn find_route(send_node: &Node, route_params: &RouteParameters) -> Result<Route, msgs::LightningError> {
+ let scorer = TestScorer::new();
+ let keys_manager = TestKeysInterface::new(&[0u8; 32], bitcoin::network::constants::Network::Testnet);
+ let random_seed_bytes = keys_manager.get_secure_random_bytes();
+ router::find_route(
+ &send_node.node.get_our_node_id(), route_params, &send_node.network_graph,
+ Some(&send_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
+ send_node.logger, &scorer, &Default::default(), &random_seed_bytes
+ )
+}
+
/// Gets a route from the given sender to the node described in `payment_params`.
///
/// Don't use this, use the identically-named function instead.
nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
assert_eq!(node_txn.len(), 1);
+ mine_transaction(&nodes[1], &node_txn[0]);
+ if nodes[1].connect_style.borrow().updates_best_block_first() {
+ let _ = nodes[1].tx_broadcaster.txn_broadcast();
+ }
+
mine_transaction(&nodes[0], &node_txn[0]);
check_added_monitors!(nodes[0], 1);
test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 1);
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
// One pending HTLC is discarded by the force-close:
let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
// connect_style.
return;
}
- create_announced_chan_between_nodes(&nodes, 0, 1);
+ let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;
route_payment(&nodes[0], &[&nodes[1]], 10000000);
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
- let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(node_txn.len(), 3);
- assert_eq!(node_txn[0].txid(), node_txn[1].txid());
+ let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+ assert_eq!(node_txn.len(), 2);
+ check_spends!(node_txn[0], funding_tx);
+ check_spends!(node_txn[1], node_txn[0]);
- let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[1].clone()]);
+ let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]);
connect_block(&nodes[1], &block);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
- let tx = {
+ let commitment_tx = {
let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
// Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
// have a use for it unless nodes[2] learns the preimage somehow, the funds will go
node_txn.remove(0)
};
- mine_transaction(&nodes[1], &tx);
+ mine_transaction(&nodes[1], &commitment_tx);
// Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
check_closed_broadcast!(nodes[1], true);
get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
.provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
}
- mine_transaction(&nodes[2], &tx);
- let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
- assert_eq!(node_txn.len(), 1);
- assert_eq!(node_txn[0].input.len(), 1);
- assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
- assert_eq!(node_txn[0].lock_time, LockTime::ZERO); // Must be an HTLC-Success
- assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
+ mine_transaction(&nodes[2], &commitment_tx);
+ let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast();
+ assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
+ let htlc_tx = node_txn.pop().unwrap();
+ assert_eq!(htlc_tx.input.len(), 1);
+ assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.txid());
+ assert_eq!(htlc_tx.lock_time, LockTime::ZERO); // Must be an HTLC-Success
+ assert_eq!(htlc_tx.input[0].witness.len(), 5); // Must be an HTLC-Success
- check_spends!(node_txn[0], tx);
+ check_spends!(htlc_tx, commitment_tx);
}
#[test]
watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
// Watchtower Alice should have broadcast a commitment/HTLC-timeout
- let alice_state = {
+ {
let mut txn = alice_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 2);
- txn.remove(0)
+ check_spends!(txn[0], chan_1.3);
+ check_spends!(txn[1], txn[0]);
};
// Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
check_added_monitors(&nodes[0], 1);
{
let htlc_txn = alice_broadcaster.txn_broadcast();
- assert_eq!(htlc_txn.len(), 2);
+ assert_eq!(htlc_txn.len(), 1);
check_spends!(htlc_txn[0], bob_state_y);
- // Alice doesn't clean up the old HTLC claim since it hasn't seen a conflicting spend for
- // it. However, she should, because it now has an invalid parent.
- check_spends!(htlc_txn[1], alice_state);
}
}
assert_eq!(bob_txn.len(), 1);
check_spends!(bob_txn[0], txn_to_broadcast[0]);
} else {
- assert_eq!(bob_txn.len(), 2);
+ if nodes[1].connect_style.borrow().updates_best_block_first() {
+ assert_eq!(bob_txn.len(), 3);
+ assert_eq!(bob_txn[0].txid(), bob_txn[1].txid());
+ } else {
+ assert_eq!(bob_txn.len(), 2);
+ }
check_spends!(bob_txn[0], chan_ab.3);
}
}
// If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
// Bob force-closed and broadcasts the commitment transaction along with a
// HTLC-output-claiming transaction.
- let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+ let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
if broadcast_alice {
assert_eq!(bob_txn.len(), 1);
check_spends!(bob_txn[0], txn_to_broadcast[0]);
assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
} else {
- assert_eq!(bob_txn.len(), 2);
- check_spends!(bob_txn[1], txn_to_broadcast[0]);
- assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight);
+ assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 });
+ let htlc_tx = bob_txn.pop().unwrap();
+ check_spends!(htlc_tx, txn_to_broadcast[0]);
+ assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), script_weight);
}
}
}
// We should broadcast an HTLC transaction spending our funding transaction first
let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(spending_txn.len(), 2);
- assert_eq!(spending_txn[0].txid(), node_txn[0].txid());
- check_spends!(spending_txn[1], node_txn[0]);
+ let htlc_tx = if spending_txn[0].txid() == node_txn[0].txid() {
+ &spending_txn[1]
+ } else {
+ &spending_txn[0]
+ };
+ check_spends!(htlc_tx, node_txn[0]);
// We should also generate a SpendableOutputs event with the to_self output (as its
// timelock is up).
let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
// should immediately fail-backwards the HTLC to the previous hop, without waiting for an
// additional block built on top of the current chain.
nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
- &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
+ &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
check_added_monitors!(nodes[1], 1);
#[cfg(test)]
#[allow(unused_mut)]
mod shutdown_tests;
-#[cfg(test)]
+#[cfg(all(test, async_signing))]
#[allow(unused_mut)]
mod async_signer_tests;
commitment_tx
};
let commitment_tx_conf_height_a = block_from_scid(&mine_transaction(&nodes[0], &commitment_tx));
- if anchors && nodes[0].connect_style.borrow().updates_best_block_first() {
+ if nodes[0].connect_style.borrow().updates_best_block_first() {
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
assert_eq!(txn[0].txid(), commitment_tx.txid());
};
mine_transaction(&nodes[0], &commitment_tx);
+ if nodes[0].connect_style.borrow().updates_best_block_first() {
+ let txn = nodes[0].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ assert_eq!(txn[0].txid(), commitment_tx.txid());
+ }
// Connect blocks until the HTLC's expiration is met, expecting a transaction broadcast.
connect_blocks(&nodes[0], TEST_FINAL_CLTV);
nodes[1].node.timer_tick_occurred();
check_added_monitors(&nodes[1], 2);
check_closed_event!(&nodes[1], 2, ClosureReason::OutdatedChannelManager, [nodes[0].node.get_our_node_id(); 2], 1000000);
- let (revoked_commitment_a, revoked_commitment_b) = {
- let txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
- assert_eq!(txn.len(), 2);
- assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
- assert_eq!(txn[1].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
- if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
- check_spends!(&txn[0], &chan_a.3);
- check_spends!(&txn[1], &chan_b.3);
- (txn[0].clone(), txn[1].clone())
- } else {
- check_spends!(&txn[1], &chan_a.3);
- check_spends!(&txn[0], &chan_b.3);
- (txn[1].clone(), txn[0].clone())
- }
- };
// Bob should now receive two events to bump his revoked commitment transaction fees.
assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
let events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
+ let mut revoked_commitment_txs = Vec::with_capacity(events.len());
let mut anchor_txs = Vec::with_capacity(events.len());
for (idx, event) in events.into_iter().enumerate() {
let utxo_value = Amount::ONE_BTC.to_sat() * (idx + 1) as u64;
};
let txn = nodes[1].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 2);
+ assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
+ if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
+ check_spends!(&txn[0], &chan_a.3);
+ } else {
+ check_spends!(&txn[0], &chan_b.3);
+ }
let (commitment_tx, anchor_tx) = (&txn[0], &txn[1]);
check_spends!(anchor_tx, coinbase_tx, commitment_tx);
+
+ revoked_commitment_txs.push(commitment_tx.clone());
anchor_txs.push(anchor_tx.clone());
};
for node in &nodes {
- mine_transactions(node, &[&revoked_commitment_a, &anchor_txs[0], &revoked_commitment_b, &anchor_txs[1]]);
+ mine_transactions(node, &[&revoked_commitment_txs[0], &anchor_txs[0], &revoked_commitment_txs[1], &anchor_txs[1]]);
}
check_added_monitors!(&nodes[0], 2);
check_closed_broadcast(&nodes[0], 2, true);
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(txn.len(), 4);
- let (revoked_htlc_claim_a, revoked_htlc_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_a.txid() {
+ let (revoked_htlc_claim_a, revoked_htlc_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_txs[0].txid() {
(if txn[0].input.len() == 2 { &txn[0] } else { &txn[1] }, if txn[2].input.len() == 2 { &txn[2] } else { &txn[3] })
} else {
(if txn[2].input.len() == 2 { &txn[2] } else { &txn[3] }, if txn[0].input.len() == 2 { &txn[0] } else { &txn[1] })
assert_eq!(revoked_htlc_claim_a.input.len(), 2); // Spends both HTLC outputs
assert_eq!(revoked_htlc_claim_a.output.len(), 1);
- check_spends!(revoked_htlc_claim_a, revoked_commitment_a);
+ check_spends!(revoked_htlc_claim_a, revoked_commitment_txs[0]);
assert_eq!(revoked_htlc_claim_b.input.len(), 2); // Spends both HTLC outputs
assert_eq!(revoked_htlc_claim_b.output.len(), 1);
- check_spends!(revoked_htlc_claim_b, revoked_commitment_b);
+ check_spends!(revoked_htlc_claim_b, revoked_commitment_txs[1]);
}
// Since Bob was able to confirm his revoked commitment, he'll now try to claim the HTLCs
sig
};
htlc_tx.input[0].witness = Witness::from_slice(&[fee_utxo_sig, public_key.to_bytes()]);
- check_spends!(htlc_tx, coinbase_tx, revoked_commitment_a, revoked_commitment_b);
+ check_spends!(htlc_tx, coinbase_tx, revoked_commitment_txs[0], revoked_commitment_txs[1]);
htlc_tx
};
).unwrap();
if let SpendableOutputDescriptor::StaticPaymentOutput(_) = &outputs[0] {
- check_spends!(spend_tx, &revoked_commitment_a, &revoked_commitment_b);
+ check_spends!(spend_tx, &revoked_commitment_txs[0], &revoked_commitment_txs[1]);
} else {
check_spends!(spend_tx, revoked_claim_transactions.get(&spend_tx.input[0].previous_output.txid).unwrap());
}
// If we update the best block to the new height before providing the confirmed transactions,
// we'll see another broadcast of the commitment transaction.
- if anchors && !confirm_counterparty_commitment && nodes[0].connect_style.borrow().updates_best_block_first() {
+ if !confirm_counterparty_commitment && nodes[0].connect_style.borrow().updates_best_block_first() {
let _ = nodes[0].tx_broadcaster.txn_broadcast();
}
let htlc_timeout_tx = {
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
- let tx = if txn[0].input[0].previous_output.txid == commitment_tx.txid() {
- txn[0].clone()
- } else {
- txn[1].clone()
- };
+ let tx = txn.pop().unwrap();
check_spends!(tx, commitment_tx, coinbase_tx);
tx
};
// These types aren't intended to be pub, but are exposed for direct fuzzing (as we deserialize
// them from untrusted input):
#[derive(Clone)]
+ #[cfg_attr(test, derive(Debug, PartialEq))]
pub struct FinalOnionHopData {
pub payment_secret: PaymentSecret,
/// The total value, in msat, of the payment as received by the ultimate recipient.
outgoing_cltv_value: u32,
payment_secret: PaymentSecret,
payment_constraints: PaymentConstraints,
- intro_node_blinding_point: PublicKey,
+ intro_node_blinding_point: Option<PublicKey>,
}
}
}
}
-impl<NS: Deref> ReadableArgs<&NS> for InboundOnionPayload where NS::Target: NodeSigner {
- fn read<R: Read>(r: &mut R, node_signer: &NS) -> Result<Self, DecodeError> {
+impl<NS: Deref> ReadableArgs<(Option<PublicKey>, &NS)> for InboundOnionPayload where NS::Target: NodeSigner {
+ fn read<R: Read>(r: &mut R, args: (Option<PublicKey>, &NS)) -> Result<Self, DecodeError> {
+ let (update_add_blinding_point, node_signer) = args;
+
let mut amt = None;
let mut cltv_value = None;
let mut short_id: Option<u64> = None;
});
if amt.unwrap_or(0) > MAX_VALUE_MSAT { return Err(DecodeError::InvalidValue) }
+ if intro_node_blinding_point.is_some() && update_add_blinding_point.is_some() {
+ return Err(DecodeError::InvalidValue)
+ }
- if let Some(blinding_point) = intro_node_blinding_point {
- if short_id.is_some() || payment_data.is_some() || payment_metadata.is_some() {
+ if let Some(blinding_point) = intro_node_blinding_point.or(update_add_blinding_point) {
+ if short_id.is_some() || payment_data.is_some() || payment_metadata.is_some() ||
+ keysend_preimage.is_some()
+ {
return Err(DecodeError::InvalidValue)
}
let enc_tlvs = encrypted_tlvs_opt.ok_or(DecodeError::InvalidValue)?.0;
payment_relay,
payment_constraints,
features,
- intro_node_blinding_point: blinding_point,
+ intro_node_blinding_point: intro_node_blinding_point.ok_or(DecodeError::InvalidValue)?,
})
},
ChaChaPolyReadAdapter { readable: BlindedPaymentTlvs::Receive(ReceiveTlvs {
outgoing_cltv_value: cltv_value.ok_or(DecodeError::InvalidValue)?,
payment_secret,
payment_constraints,
- intro_node_blinding_point: blinding_point,
+ intro_node_blinding_point,
})
},
}
assert_eq!(encoded_value, target_value);
let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
- let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
+ let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), (None, &&node_signer)).unwrap();
if let msgs::InboundOnionPayload::Forward {
short_channel_id, amt_to_forward, outgoing_cltv_value
} = inbound_msg {
assert_eq!(encoded_value, target_value);
let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
- let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
+ let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), (None, &&node_signer)).unwrap();
if let msgs::InboundOnionPayload::Receive {
payment_data: None, amt_msat, outgoing_cltv_value, ..
} = inbound_msg {
assert_eq!(encoded_value, target_value);
let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
- let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
+ let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), (None, &&node_signer)).unwrap();
if let msgs::InboundOnionPayload::Receive {
payment_data: Some(FinalOnionHopData {
payment_secret,
};
let encoded_value = msg.encode();
let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
- assert!(msgs::InboundOnionPayload::read(&mut Cursor::new(&encoded_value[..]), &&node_signer).is_err());
+ assert!(msgs::InboundOnionPayload::read(&mut Cursor::new(&encoded_value[..]), (None, &&node_signer)).is_err());
let good_type_range_tlvs = vec![
((1 << 16) - 3, vec![42]),
((1 << 16) - 1, vec![42; 32]),
*custom_tlvs = good_type_range_tlvs.clone();
}
let encoded_value = msg.encode();
- let inbound_msg = ReadableArgs::read(&mut Cursor::new(&encoded_value[..]), &&node_signer).unwrap();
+ let inbound_msg = ReadableArgs::read(&mut Cursor::new(&encoded_value[..]), (None, &&node_signer)).unwrap();
match inbound_msg {
msgs::InboundOnionPayload::Receive { custom_tlvs, .. } => assert!(custom_tlvs.is_empty()),
_ => panic!(),
let target_value = <Vec<u8>>::from_hex("2e02080badf00d010203040404ffffffffff0000000146c6616b021234ff0000000146c6616f084242424242424242").unwrap();
assert_eq!(encoded_value, target_value);
let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
- let inbound_msg: msgs::InboundOnionPayload = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
+ let inbound_msg: msgs::InboundOnionPayload = ReadableArgs::read(&mut Cursor::new(&target_value[..]), (None, &&node_signer)).unwrap();
if let msgs::InboundOnionPayload::Receive {
payment_data: None,
payment_metadata: None,
let mut rd = Cursor::new(&big_payload[..]);
let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
- <msgs::InboundOnionPayload as ReadableArgs<&&test_utils::TestKeysInterface>>
- ::read(&mut rd, &&node_signer).unwrap();
+ <msgs::InboundOnionPayload as ReadableArgs<(Option<PublicKey>, &&test_utils::TestKeysInterface)>>
+ ::read(&mut rd, (None, &&node_signer)).unwrap();
}
// see above test, needs to be a separate method for use of the serialization macros.
fn encode_big_payload() -> Result<Vec<u8>, io::Error> {
//! Primarily features [`peel_payment_onion`], which allows the decoding of an onion statelessly
//! and can be used to predict whether we'd accept a payment.
-use bitcoin::hashes::Hash;
+use bitcoin::hashes::{Hash, HashEngine};
+use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use bitcoin::hashes::sha256::Hash as Sha256;
-use bitcoin::secp256k1::{self, Secp256k1, PublicKey};
+use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1};
use crate::blinded_path;
use crate::blinded_path::payment::{PaymentConstraints, PaymentRelay};
pub msg: &'static str,
}
+fn check_blinded_payment_constraints(
+ amt_msat: u64, cltv_expiry: u32, constraints: &PaymentConstraints
+) -> Result<(), ()> {
+ if amt_msat < constraints.htlc_minimum_msat ||
+ cltv_expiry > constraints.max_cltv_expiry
+ { return Err(()) }
+ Ok(())
+}
+
fn check_blinded_forward(
inbound_amt_msat: u64, inbound_cltv_expiry: u32, payment_relay: &PaymentRelay,
payment_constraints: &PaymentConstraints, features: &BlindedHopFeatures
let outgoing_cltv_value = inbound_cltv_expiry.checked_sub(
payment_relay.cltv_expiry_delta as u32
).ok_or(())?;
- if inbound_amt_msat < payment_constraints.htlc_minimum_msat ||
- outgoing_cltv_value > payment_constraints.max_cltv_expiry
- { return Err(()) }
+ check_blinded_payment_constraints(inbound_amt_msat, outgoing_cltv_value, payment_constraints)?;
+
if features.requires_unknown_bits_from(&BlindedHopFeatures::empty()) { return Err(()) }
Ok((amt_to_forward, outgoing_cltv_value))
}
amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool,
counterparty_skimmed_fee_msat: Option<u64>, current_height: u32, accept_mpp_keysend: bool,
) -> Result<PendingHTLCInfo, InboundOnionErr> {
- let (payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, outgoing_cltv_value, payment_metadata) = match hop_data {
+ let (
+ payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, outgoing_cltv_value,
+ payment_metadata, requires_blinded_error
+ ) = match hop_data {
msgs::InboundOnionPayload::Receive {
payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata, ..
} =>
- (payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata),
+ (payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata,
+ false),
msgs::InboundOnionPayload::BlindedReceive {
- amt_msat, total_msat, outgoing_cltv_value, payment_secret, ..
+ amt_msat, total_msat, outgoing_cltv_value, payment_secret, intro_node_blinding_point,
+ payment_constraints, ..
} => {
+ check_blinded_payment_constraints(amt_msat, cltv_expiry, &payment_constraints)
+ .map_err(|()| {
+ InboundOnionErr {
+ err_code: INVALID_ONION_BLINDING,
+ err_data: vec![0; 32],
+ msg: "Amount or cltv_expiry violated blinded payment constraints",
+ }
+ })?;
let payment_data = msgs::FinalOnionHopData { payment_secret, total_msat };
- (Some(payment_data), None, Vec::new(), amt_msat, outgoing_cltv_value, None)
+ (Some(payment_data), None, Vec::new(), amt_msat, outgoing_cltv_value, None,
+ intro_node_blinding_point.is_none())
}
msgs::InboundOnionPayload::Forward { .. } => {
return Err(InboundOnionErr {
incoming_cltv_expiry: outgoing_cltv_value,
phantom_shared_secret,
custom_tlvs,
+ requires_blinded_error,
}
} else {
return Err(InboundOnionErr {
($msg: expr, $err_code: expr) => {
{
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+ let (sha256_of_onion, failure_code) = if msg.blinding_point.is_some() {
+ ([0; 32], INVALID_ONION_BLINDING)
+ } else {
+ (Sha256::hash(&msg.onion_routing_packet.hop_data).to_byte_array(), $err_code)
+ };
return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
- sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).to_byte_array(),
- failure_code: $err_code,
+ sha256_of_onion,
+ failure_code,
}));
}
}
return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6);
}
+ let blinded_node_id_tweak = msg.blinding_point.map(|bp| {
+ let blinded_tlvs_ss = node_signer.ecdh(Recipient::Node, &bp, None).unwrap().secret_bytes();
+ let mut hmac = HmacEngine::<Sha256>::new(b"blinded_node_id");
+ hmac.input(blinded_tlvs_ss.as_ref());
+ Scalar::from_be_bytes(Hmac::from_engine(hmac).to_byte_array()).unwrap()
+ });
let shared_secret = node_signer.ecdh(
- Recipient::Node, &msg.onion_routing_packet.public_key.unwrap(), None
+ Recipient::Node, &msg.onion_routing_packet.public_key.unwrap(), blinded_node_id_tweak.as_ref()
).unwrap().secret_bytes();
if msg.onion_routing_packet.version != 0 {
macro_rules! return_err {
($msg: expr, $err_code: expr, $data: expr) => {
{
+ if msg.blinding_point.is_some() {
+ return_malformed_err!($msg, INVALID_ONION_BLINDING)
+ }
+
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
let next_hop = match onion_utils::decode_next_payment_hop(
shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac,
- msg.payment_hash, node_signer
+ msg.payment_hash, msg.blinding_point, node_signer
) {
Ok(res) => res,
Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
pub(crate) fn decode_next_payment_hop<NS: Deref>(
shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32], payment_hash: PaymentHash,
- node_signer: &NS,
+ blinding_point: Option<PublicKey>, node_signer: &NS,
) -> Result<Hop, OnionDecodeErr> where NS::Target: NodeSigner {
- match decode_next_hop(shared_secret, hop_data, hmac_bytes, Some(payment_hash), node_signer) {
+ match decode_next_hop(
+ shared_secret, hop_data, hmac_bytes, Some(payment_hash), (blinding_point, node_signer)
+ ) {
Ok((next_hop_data, None)) => Ok(Hop::Receive(next_hop_data)),
Ok((next_hop_data, Some((next_hop_hmac, FixedSizeOnionPacket(new_packet_bytes))))) => {
Ok(Hop::Forward {
let nodes_0_deserialized;
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+ let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
// Serialize the ChannelManager prior to sending payments
assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
mine_transaction(&nodes[1], &as_commitment_tx);
- let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(bs_htlc_claim_txn.len(), 1);
- check_spends!(bs_htlc_claim_txn[0], as_commitment_tx);
+ let bs_htlc_claim_txn = {
+ let mut txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
+ assert_eq!(txn.len(), 2);
+ check_spends!(txn[0], funding_tx);
+ check_spends!(txn[1], as_commitment_tx);
+ txn.pop().unwrap()
+ };
if !confirm_before_reload {
mine_transaction(&nodes[0], &as_commitment_tx);
+ let txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ assert_eq!(txn[0].txid(), as_commitment_tx.txid());
}
- mine_transaction(&nodes[0], &bs_htlc_claim_txn[0]);
+ mine_transaction(&nodes[0], &bs_htlc_claim_txn);
expect_payment_sent(&nodes[0], payment_preimage_1, None, true, false);
connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20);
let (first_htlc_timeout_tx, second_htlc_timeout_tx) = {
};
check_spends!(first_htlc_timeout_tx, as_commitment_tx);
check_spends!(second_htlc_timeout_tx, as_commitment_tx);
- if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output {
+ if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn.input[0].previous_output {
confirm_transaction(&nodes[0], &second_htlc_timeout_tx);
} else {
confirm_transaction(&nodes[0], &first_htlc_timeout_tx);
// the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved
// after the commitment transaction, so always connect the commitment transaction.
mine_transaction(&nodes[0], &bs_commitment_tx[0]);
+ if nodes[0].connect_style.borrow().updates_best_block_first() {
+ let _ = nodes[0].tx_broadcaster.txn_broadcast();
+ }
mine_transaction(&nodes[1], &bs_commitment_tx[0]);
if !use_dust {
connect_blocks(&nodes[0], TEST_FINAL_CLTV + (MIN_CLTV_EXPIRY_DELTA as u32));
connect_blocks(&nodes[1], TEST_FINAL_CLTV + (MIN_CLTV_EXPIRY_DELTA as u32));
let as_htlc_timeout = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- check_spends!(as_htlc_timeout[0], bs_commitment_tx[0]);
assert_eq!(as_htlc_timeout.len(), 1);
+ check_spends!(as_htlc_timeout[0], bs_commitment_tx[0]);
mine_transaction(&nodes[0], &as_htlc_timeout[0]);
- // nodes[0] may rebroadcast (or RBF-bump) its HTLC-Timeout, so wipe the announced set.
- nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
mine_transaction(&nodes[1], &as_htlc_timeout[0]);
}
+ if nodes[0].connect_style.borrow().updates_best_block_first() {
+ let _ = nodes[0].tx_broadcaster.txn_broadcast();
+ }
// Create a new channel on which to retry the payment before we fail the payment via the
// HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
// Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
- let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(node_txn.len(), 3);
- assert_eq!(node_txn[0].txid(), node_txn[1].txid());
- check_spends!(node_txn[1], funding_tx);
- check_spends!(node_txn[2], node_txn[1]);
- let timeout_txn = vec![node_txn[2].clone()];
+ let (commitment_tx, htlc_timeout_tx) = {
+ let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+ assert_eq!(txn.len(), 2);
+ check_spends!(txn[0], funding_tx);
+ check_spends!(txn[1], txn[0]);
+ (txn.remove(0), txn.remove(0))
+ };
nodes[1].node.claim_funds(payment_preimage);
check_added_monitors!(nodes[1], 1);
expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
- connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[1].clone()]));
+ mine_transaction(&nodes[1], &commitment_tx);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
- let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(claim_txn.len(), 1);
- check_spends!(claim_txn[0], node_txn[1]);
+ let htlc_success_tx = {
+ let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], commitment_tx);
+ txn.pop().unwrap()
+ };
- connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[1].clone()]));
+ mine_transaction(&nodes[0], &commitment_tx);
if confirm_commitment_tx {
connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
}
- let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, if payment_timeout { timeout_txn } else { vec![claim_txn[0].clone()] });
+ let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, if payment_timeout { vec![htlc_timeout_tx] } else { vec![htlc_success_tx] });
if payment_timeout {
assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!
confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
} else {
connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1 + 1);
- let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(bs_htlc_timeout_tx.len(), 1);
- confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
+ let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
+ let bs_htlc_timeout_tx = txn.pop().unwrap();
+ confirm_transaction(&nodes[1], &bs_htlc_timeout_tx);
}
} else {
confirm_transaction(&nodes[1], &bs_commitment_tx[0]);
mine_transaction(&nodes[0], &commitment_tx_b);
mine_transaction(&nodes[1], &commitment_tx_b);
+ if nodes[1].connect_style.borrow().updates_best_block_first() {
+ let _ = nodes[1].tx_broadcaster.txn_broadcast();
+ }
// Provide the preimage now, such that we only claim from the holder commitment (since it's
// currently confirmed) and not the counterparty's.
// commitment (still unrevoked) is the currently confirmed closing transaction.
assert_eq!(htlc_preimage_tx.input[0].witness.second_to_last().unwrap(), &payment_preimage.0[..]);
}
+
+fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_counterparty_commitment: bool) {
+ // Tests that a node will retry broadcasting its own commitment after seeing a confirmed
+ // counterparty commitment be reorged out.
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ if revoked_counterparty_commitment {
+ chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
+ }
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let mut config = test_default_channel_config();
+ if anchors {
+ config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
+ config.manually_accept_inbound_channels = true;
+ }
+ let persister;
+ let new_chain_monitor;
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
+ let nodes_1_deserialized;
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ // Route a payment so we have an HTLC to claim as well.
+ let _ = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+
+ if revoked_counterparty_commitment {
+ // Trigger a fee update such that we advance the state. We will have B broadcast its state
+ // without the fee update.
+ let serialized_node = nodes[1].node.encode();
+ let serialized_monitor = get_monitor!(nodes[1], chan_id).encode();
+
+ *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap() += 1;
+ nodes[0].node.timer_tick_occurred();
+ check_added_monitors!(nodes[0], 1);
+
+ let fee_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &fee_update.update_fee.unwrap());
+ commitment_signed_dance!(nodes[1], nodes[0], fee_update.commitment_signed, false);
+
+ reload_node!(
+ nodes[1], config, &serialized_node, &[&serialized_monitor], persister, new_chain_monitor, nodes_1_deserialized
+ );
+ }
+
+ // Connect blocks until the HTLC expiry is met, prompting a commitment broadcast by A.
+ connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
+ check_closed_broadcast(&nodes[0], 1, true);
+ check_added_monitors(&nodes[0], 1);
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
+
+ {
+ let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
+ if anchors {
+ assert_eq!(txn.len(), 1);
+ let commitment_tx_a = txn.pop().unwrap();
+ check_spends!(commitment_tx_a, funding_tx);
+ } else {
+ assert_eq!(txn.len(), 2);
+ let htlc_tx_a = txn.pop().unwrap();
+ let commitment_tx_a = txn.pop().unwrap();
+ check_spends!(commitment_tx_a, funding_tx);
+ check_spends!(htlc_tx_a, commitment_tx_a);
+ }
+ };
+
+ // B will also broadcast its own commitment.
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id()).unwrap();
+ check_closed_broadcast(&nodes[1], 1, true);
+ check_added_monitors(&nodes[1], 1);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100_000);
+
+ let commitment_b = {
+ let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ let tx = txn.pop().unwrap();
+ check_spends!(tx, funding_tx);
+ tx
+ };
+
+ // Confirm B's commitment, A should now broadcast an HTLC timeout for commitment B.
+ mine_transaction(&nodes[0], &commitment_b);
+ {
+ let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
+ if nodes[0].connect_style.borrow().updates_best_block_first() {
+ // `commitment_a` and `htlc_timeout_a` are rebroadcast because the best block was
+ // updated prior to seeing `commitment_b`.
+ assert_eq!(txn.len(), if anchors { 2 } else { 3 });
+ check_spends!(txn.last().unwrap(), commitment_b);
+ } else {
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], commitment_b);
+ }
+ }
+
+ // Disconnect the block, allowing A to retry its own commitment. Note that we connect two
+ // blocks, one to get us back to the original height, and another to retry our pending claims.
+ disconnect_blocks(&nodes[0], 1);
+ connect_blocks(&nodes[0], 2);
+ {
+ let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+ if anchors {
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], funding_tx);
+ } else {
+ assert_eq!(txn.len(), 2);
+ check_spends!(txn[0], txn[1]); // HTLC timeout A
+ check_spends!(txn[1], funding_tx); // Commitment A
+ assert_ne!(txn[1].txid(), commitment_b.txid());
+ }
+ }
+}
+
+#[test]
+fn test_retries_own_commitment_broadcast_after_reorg() {
+ do_test_retries_own_commitment_broadcast_after_reorg(false, false);
+ do_test_retries_own_commitment_broadcast_after_reorg(false, true);
+ do_test_retries_own_commitment_broadcast_after_reorg(true, false);
+ do_test_retries_own_commitment_broadcast_after_reorg(true, true);
+}
use crate::events::{Event, EventsProvider};
use crate::ln::features::InitFeatures;
use crate::ln::msgs::{self, DecodeError, OnionMessageHandler, SocketAddress};
-use crate::sign::{NodeSigner, Recipient};
+use crate::sign::{EntropySource, NodeSigner, Recipient};
use crate::util::ser::{FixedLengthReader, LengthReadable, Writeable, Writer};
use crate::util::test_utils;
use super::{CustomOnionMessageHandler, Destination, MessageRouter, OffersMessage, OffersMessageHandler, OnionMessageContents, OnionMessagePath, OnionMessenger, PendingOnionMessage, SendError};
use bitcoin::network::constants::Network;
use bitcoin::hashes::hex::FromHex;
-use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey, self};
use crate::io;
use crate::io_extras::read_to_end;
Some(vec![SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1000 }]),
})
}
+
+ fn create_blinded_paths<
+ ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
+ _secp_ctx: &Secp256k1<T>
+ ) -> Result<Vec<BlindedPath>, ()> {
+ unreachable!()
+ }
}
struct TestOffersMessageHandler {}
/// # extern crate bitcoin;
/// # use bitcoin::hashes::_export::_core::time::Duration;
/// # use bitcoin::hashes::hex::FromHex;
-/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey, self};
/// # use lightning::blinded_path::BlindedPath;
-/// # use lightning::sign::KeysManager;
+/// # use lightning::sign::{EntropySource, KeysManager};
/// # use lightning::ln::peer_handler::IgnoringMessageHandler;
/// # use lightning::onion_message::{OnionMessageContents, Destination, MessageRouter, OnionMessagePath, OnionMessenger};
/// # use lightning::util::logger::{Logger, Record};
/// # first_node_addresses: None,
/// # })
/// # }
+/// # fn create_blinded_paths<ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification>(
+/// # &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES, _secp_ctx: &Secp256k1<T>
+/// # ) -> Result<Vec<BlindedPath>, ()> {
+/// # unreachable!()
+/// # }
/// # }
/// # let seed = [42u8; 32];
/// # let time = Duration::from_secs(123456);
fn find_path(
&self, sender: PublicKey, peers: Vec<PublicKey>, destination: Destination
) -> Result<OnionMessagePath, ()>;
+
+ /// Creates [`BlindedPath`]s to the `recipient` node. The nodes in `peers` are assumed to be
+ /// direct peers with the `recipient`.
+ fn create_blinded_paths<
+ ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, entropy_source: &ES,
+ secp_ctx: &Secp256k1<T>
+ ) -> Result<Vec<BlindedPath>, ()>;
}
/// A [`MessageRouter`] that can only route to a directly connected [`Destination`].
}
}
}
+
+ fn create_blinded_paths<
+ ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, entropy_source: &ES,
+ secp_ctx: &Secp256k1<T>
+ ) -> Result<Vec<BlindedPath>, ()> {
+ // Limit the number of blinded paths that are computed.
+ const MAX_PATHS: usize = 3;
+
+ // Ensure peers have at least three channels so that it is more difficult to infer the
+ // recipient's node_id.
+ const MIN_PEER_CHANNELS: usize = 3;
+
+ let network_graph = self.network_graph.deref().read_only();
+ let paths = peers.iter()
+ // Limit to peers with announced channels
+ .filter(|pubkey|
+ network_graph
+ .node(&NodeId::from_pubkey(pubkey))
+ .map(|info| &info.channels[..])
+ .map(|channels| channels.len() >= MIN_PEER_CHANNELS)
+ .unwrap_or(false)
+ )
+ .map(|pubkey| vec![*pubkey, recipient])
+ .map(|node_pks| BlindedPath::new_for_message(&node_pks, entropy_source, secp_ctx))
+ .take(MAX_PATHS)
+ .collect::<Result<Vec<_>, _>>();
+
+ match paths {
+ Ok(paths) if !paths.is_empty() => Ok(paths),
+ _ => {
+ if network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)) {
+ BlindedPath::one_hop_for_message(recipient, entropy_source, secp_ctx)
+ .map(|path| vec![path])
+ } else {
+ Err(())
+ }
+ },
+ }
+ }
}
/// A path for sending an [`OnionMessage`].
fn peer_disconnected(&self, their_node_id: &PublicKey) {
match self.message_recipients.lock().unwrap().remove(their_node_id) {
Some(OnionMessageRecipient::ConnectedPeer(..)) => {},
- _ => debug_assert!(false),
+ Some(_) => debug_assert!(false),
+ None => {},
}
}
//! The router finds paths within a [`NetworkGraph`] for a payment.
-use bitcoin::secp256k1::PublicKey;
+use bitcoin::secp256k1::{PublicKey, Secp256k1, self};
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use crate::blinded_path::{BlindedHop, BlindedPath};
+use crate::blinded_path::payment::{ForwardNode, ForwardTlvs, PaymentConstraints, PaymentRelay, ReceiveTlvs};
use crate::ln::PaymentHash;
use crate::ln::channelmanager::{ChannelDetails, PaymentId};
-use crate::ln::features::{Bolt11InvoiceFeatures, Bolt12InvoiceFeatures, ChannelFeatures, NodeFeatures};
+use crate::ln::features::{BlindedHopFeatures, Bolt11InvoiceFeatures, Bolt12InvoiceFeatures, ChannelFeatures, NodeFeatures};
use crate::ln::msgs::{DecodeError, ErrorAction, LightningError, MAX_VALUE_MSAT};
use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice};
+use crate::onion_message::{DefaultMessageRouter, Destination, MessageRouter, OnionMessagePath};
use crate::routing::gossip::{DirectedChannelInfo, EffectiveCapacity, ReadOnlyNetworkGraph, NetworkGraph, NodeId, RoutingFees};
use crate::routing::scoring::{ChannelUsage, LockableScore, ScoreLookUp};
+use crate::sign::EntropySource;
use crate::util::ser::{Writeable, Readable, ReadableArgs, Writer};
use crate::util::logger::{Level, Logger};
use crate::util::chacha20::ChaCha20;
use core::ops::Deref;
/// A [`Router`] implemented using [`find_route`].
-pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> where
+pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> where
L::Target: Logger,
S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
{
logger: L,
random_seed_bytes: Mutex<[u8; 32]>,
scorer: S,
- score_params: SP
+ score_params: SP,
+ message_router: DefaultMessageRouter<G, L>,
}
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> DefaultRouter<G, L, S, SP, Sc> where
+impl<G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> DefaultRouter<G, L, S, SP, Sc> where
L::Target: Logger,
S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
{
/// Creates a new router.
pub fn new(network_graph: G, logger: L, random_seed_bytes: [u8; 32], scorer: S, score_params: SP) -> Self {
let random_seed_bytes = Mutex::new(random_seed_bytes);
- Self { network_graph, logger, random_seed_bytes, scorer, score_params }
+ let message_router = DefaultMessageRouter::new(network_graph.clone());
+ Self { network_graph, logger, random_seed_bytes, scorer, score_params, message_router }
}
}
-impl< G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> Router for DefaultRouter<G, L, S, SP, Sc> where
+impl<G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> Router for DefaultRouter<G, L, S, SP, Sc> where
L::Target: Logger,
S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
{
&random_seed_bytes
)
}
+
+ fn create_blinded_payment_paths<
+ ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, recipient: PublicKey, first_hops: Vec<ChannelDetails>, tlvs: ReceiveTlvs,
+ amount_msats: u64, entropy_source: &ES, secp_ctx: &Secp256k1<T>
+ ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
+ // Limit the number of blinded paths that are computed.
+ const MAX_PAYMENT_PATHS: usize = 3;
+
+ // Ensure peers have at least three channels so that it is more difficult to infer the
+ // recipient's node_id.
+ const MIN_PEER_CHANNELS: usize = 3;
+
+ let network_graph = self.network_graph.deref().read_only();
+ let paths = first_hops.into_iter()
+ .filter(|details| details.counterparty.features.supports_route_blinding())
+ .filter(|details| amount_msats <= details.inbound_capacity_msat)
+ .filter(|details| amount_msats >= details.inbound_htlc_minimum_msat.unwrap_or(0))
+ .filter(|details| amount_msats <= details.inbound_htlc_maximum_msat.unwrap_or(0))
+ .filter(|details| network_graph
+ .node(&NodeId::from_pubkey(&details.counterparty.node_id))
+ .map(|node_info| node_info.channels.len() >= MIN_PEER_CHANNELS)
+ .unwrap_or(false)
+ )
+ .filter_map(|details| {
+ let short_channel_id = match details.get_inbound_payment_scid() {
+ Some(short_channel_id) => short_channel_id,
+ None => return None,
+ };
+ let payment_relay: PaymentRelay = match details.counterparty.forwarding_info {
+ Some(forwarding_info) => forwarding_info.into(),
+ None => return None,
+ };
+
+ // Avoid exposing esoteric CLTV expiry deltas
+ let cltv_expiry_delta = match payment_relay.cltv_expiry_delta {
+ 0..=40 => 40u32,
+ 41..=80 => 80u32,
+ 81..=144 => 144u32,
+ 145..=216 => 216u32,
+ _ => return None,
+ };
+
+ let payment_constraints = PaymentConstraints {
+ max_cltv_expiry: tlvs.payment_constraints.max_cltv_expiry + cltv_expiry_delta,
+ htlc_minimum_msat: details.inbound_htlc_minimum_msat.unwrap_or(0),
+ };
+ Some(ForwardNode {
+ tlvs: ForwardTlvs {
+ short_channel_id,
+ payment_relay,
+ payment_constraints,
+ features: BlindedHopFeatures::empty(),
+ },
+ node_id: details.counterparty.node_id,
+ htlc_maximum_msat: details.inbound_htlc_maximum_msat.unwrap_or(0),
+ })
+ })
+ .map(|forward_node| {
+ BlindedPath::new_for_payment(
+ &[forward_node], recipient, tlvs.clone(), u64::MAX, entropy_source, secp_ctx
+ )
+ })
+ .take(MAX_PAYMENT_PATHS)
+ .collect::<Result<Vec<_>, _>>();
+
+ match paths {
+ Ok(paths) if !paths.is_empty() => Ok(paths),
+ _ => {
+ if network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)) {
+ BlindedPath::one_hop_for_payment(recipient, tlvs, entropy_source, secp_ctx)
+ .map(|path| vec![path])
+ } else {
+ Err(())
+ }
+ },
+ }
+ }
+}
+
+impl< G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> MessageRouter for DefaultRouter<G, L, S, SP, Sc> where
+ L::Target: Logger,
+ S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
+{
+ fn find_path(
+ &self, sender: PublicKey, peers: Vec<PublicKey>, destination: Destination
+ ) -> Result<OnionMessagePath, ()> {
+ self.message_router.find_path(sender, peers, destination)
+ }
+
+ fn create_blinded_paths<
+ ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, entropy_source: &ES,
+ secp_ctx: &Secp256k1<T>
+ ) -> Result<Vec<BlindedPath>, ()> {
+ self.message_router.create_blinded_paths(recipient, peers, entropy_source, secp_ctx)
+ }
}
/// A trait defining behavior for routing a payment.
-pub trait Router {
+pub trait Router: MessageRouter {
/// Finds a [`Route`] for a payment between the given `payer` and a payee.
///
/// The `payee` and the payment's value are given in [`RouteParameters::payment_params`]
) -> Result<Route, LightningError> {
self.find_route(payer, route_params, first_hops, inflight_htlcs)
}
+
+ /// Creates [`BlindedPath`]s for payment to the `recipient` node. The channels in `first_hops`
+ /// are assumed to be with the `recipient`'s peers. The payment secret and any constraints are
+ /// given in `tlvs`.
+ fn create_blinded_payment_paths<
+ ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, recipient: PublicKey, first_hops: Vec<ChannelDetails>, tlvs: ReceiveTlvs,
+ amount_msats: u64, entropy_source: &ES, secp_ctx: &Secp256k1<T>
+ ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()>;
}
/// [`ScoreLookUp`] implementation that factors in in-flight HTLC liquidity.
pub(crate) mod bench_utils {
use super::*;
use std::fs::File;
+ use std::time::Duration;
use bitcoin::hashes::Hash;
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
if let Ok(route) = route_res {
for path in route.paths {
if seed & 0x80 == 0 {
- scorer.payment_path_successful(&path);
+ scorer.payment_path_successful(&path, Duration::ZERO);
} else {
let short_channel_id = path.hops[path.hops.len() / 2].short_channel_id;
- scorer.payment_path_failed(&path, short_channel_id);
+ scorer.payment_path_failed(&path, short_channel_id, Duration::ZERO);
}
seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
}
use crate::routing::router::{Path, CandidateRouteHop};
use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
use crate::util::logger::Logger;
-use crate::util::time::Time;
use crate::prelude::*;
use core::{cmp, fmt};
/// `ScoreUpdate` is used to update the scorer's internal state after a payment attempt.
pub trait ScoreUpdate {
/// Handles updating channel penalties after failing to route through a channel.
- fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64);
+ fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration);
/// Handles updating channel penalties after successfully routing along a path.
- fn payment_path_successful(&mut self, path: &Path);
+ fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration);
/// Handles updating channel penalties after a probe over the given path failed.
- fn probe_failed(&mut self, path: &Path, short_channel_id: u64);
+ fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration);
/// Handles updating channel penalties after a probe over the given path succeeded.
- fn probe_successful(&mut self, path: &Path);
+ fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration);
+
+ /// Scorers may wish to reduce their certainty of channel liquidity information over time.
+ /// Thus, this method is provided to allow scorers to observe the passage of time - the holder
+ /// of this object should call this method regularly (generally via the
+ /// `lightning-background-processor` crate).
+ fn time_passed(&mut self, duration_since_epoch: Duration);
}
/// A trait which can both lookup and update routing channel penalty scores.
#[cfg(not(c_bindings))]
impl<S: ScoreUpdate, T: DerefMut<Target=S>> ScoreUpdate for T {
- fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
- self.deref_mut().payment_path_failed(path, short_channel_id)
+ fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
+ self.deref_mut().payment_path_failed(path, short_channel_id, duration_since_epoch)
+ }
+
+ fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
+ self.deref_mut().payment_path_successful(path, duration_since_epoch)
}
- fn payment_path_successful(&mut self, path: &Path) {
- self.deref_mut().payment_path_successful(path)
+ fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
+ self.deref_mut().probe_failed(path, short_channel_id, duration_since_epoch)
}
- fn probe_failed(&mut self, path: &Path, short_channel_id: u64) {
- self.deref_mut().probe_failed(path, short_channel_id)
+ fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
+ self.deref_mut().probe_successful(path, duration_since_epoch)
}
- fn probe_successful(&mut self, path: &Path) {
- self.deref_mut().probe_successful(path)
+ fn time_passed(&mut self, duration_since_epoch: Duration) {
+ self.deref_mut().time_passed(duration_since_epoch)
}
}
} }
#[cfg(c_bindings)]
impl<'a, T: Score> ScoreUpdate for MultiThreadedScoreLockWrite<'a, T> {
- fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
- self.0.payment_path_failed(path, short_channel_id)
+ fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
+ self.0.payment_path_failed(path, short_channel_id, duration_since_epoch)
+ }
+
+ fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
+ self.0.payment_path_successful(path, duration_since_epoch)
}
- fn payment_path_successful(&mut self, path: &Path) {
- self.0.payment_path_successful(path)
+ fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
+ self.0.probe_failed(path, short_channel_id, duration_since_epoch)
}
- fn probe_failed(&mut self, path: &Path, short_channel_id: u64) {
- self.0.probe_failed(path, short_channel_id)
+ fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
+ self.0.probe_successful(path, duration_since_epoch)
}
- fn probe_successful(&mut self, path: &Path) {
- self.0.probe_successful(path)
+ fn time_passed(&mut self, duration_since_epoch: Duration) {
+ self.0.time_passed(duration_since_epoch)
}
}
}
impl ScoreUpdate for FixedPenaltyScorer {
- fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
+ fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64, _duration_since_epoch: Duration) {}
- fn payment_path_successful(&mut self, _path: &Path) {}
+ fn payment_path_successful(&mut self, _path: &Path, _duration_since_epoch: Duration) {}
- fn probe_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
+ fn probe_failed(&mut self, _path: &Path, _short_channel_id: u64, _duration_since_epoch: Duration) {}
- fn probe_successful(&mut self, _path: &Path) {}
+ fn probe_successful(&mut self, _path: &Path, _duration_since_epoch: Duration) {}
+
+ fn time_passed(&mut self, _duration_since_epoch: Duration) {}
}
impl Writeable for FixedPenaltyScorer {
}
}
-#[cfg(not(feature = "no-std"))]
-type ConfiguredTime = crate::util::time::MonotonicTime;
-#[cfg(feature = "no-std")]
-use crate::util::time::Eternity;
-#[cfg(feature = "no-std")]
-type ConfiguredTime = Eternity;
-
/// [`ScoreLookUp`] implementation using channel success probability distributions.
///
/// Channels are tracked with upper and lower liquidity bounds - when an HTLC fails at a channel,
/// formula, but using the history of a channel rather than our latest estimates for the liquidity
/// bounds.
///
-/// # Note
-///
-/// Mixing the `no-std` feature between serialization and deserialization results in undefined
-/// behavior.
-///
/// [1]: https://arxiv.org/abs/2107.05322
/// [`liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_multiplier_msat
/// [`liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_amount_multiplier_msat
/// [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life
/// [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_multiplier_msat
/// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_amount_multiplier_msat
-pub type ProbabilisticScorer<G, L> = ProbabilisticScorerUsingTime::<G, L, ConfiguredTime>;
-
-/// Probabilistic [`ScoreLookUp`] implementation.
-///
-/// This is not exported to bindings users generally all users should use the [`ProbabilisticScorer`] type alias.
-pub struct ProbabilisticScorerUsingTime<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time>
+pub struct ProbabilisticScorer<G: Deref<Target = NetworkGraph<L>>, L: Deref>
where L::Target: Logger {
decay_params: ProbabilisticScoringDecayParameters,
network_graph: G,
logger: L,
- // TODO: Remove entries of closed channels.
- channel_liquidities: HashMap<u64, ChannelLiquidity<T>>,
+ channel_liquidities: HashMap<u64, ChannelLiquidity>,
}
/// Parameters for configuring [`ProbabilisticScorer`].
///
/// Default value: 14 days
///
- /// [`historical_estimated_channel_liquidity_probabilities`]: ProbabilisticScorerUsingTime::historical_estimated_channel_liquidity_probabilities
+ /// [`historical_estimated_channel_liquidity_probabilities`]: ProbabilisticScorer::historical_estimated_channel_liquidity_probabilities
pub historical_no_updates_half_life: Duration,
/// Whenever this amount of time elapses since the last update to a channel's liquidity bounds,
/// Direction is defined in terms of [`NodeId`] partial ordering, where the source node is the
/// first node in the ordering of the channel's counterparties. Thus, swapping the two liquidity
/// offset fields gives the opposite direction.
-struct ChannelLiquidity<T: Time> {
+struct ChannelLiquidity {
/// Lower channel liquidity bound in terms of an offset from zero.
min_liquidity_offset_msat: u64,
/// Upper channel liquidity bound in terms of an offset from the effective capacity.
max_liquidity_offset_msat: u64,
- /// Time when the liquidity bounds were last modified.
- last_updated: T,
-
min_liquidity_offset_history: HistoricalBucketRangeTracker,
max_liquidity_offset_history: HistoricalBucketRangeTracker,
+
+ /// Time when either liquidity bound was last modified as an offset since the unix epoch.
+ last_updated: Duration,
+
+ /// Time when the historical liquidity bounds were last modified as an offset against the unix
+ /// epoch.
+ offset_history_last_updated: Duration,
}
-/// A snapshot of [`ChannelLiquidity`] in one direction assuming a certain channel capacity and
-/// decayed with a given half life.
-struct DirectedChannelLiquidity<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Time, U: Deref<Target = T>> {
+/// A snapshot of [`ChannelLiquidity`] in one direction assuming a certain channel capacity.
+struct DirectedChannelLiquidity<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Deref<Target = Duration>> {
min_liquidity_offset_msat: L,
max_liquidity_offset_msat: L,
liquidity_history: HistoricalMinMaxBuckets<BRT>,
capacity_msat: u64,
- last_updated: U,
- now: T,
- decay_params: ProbabilisticScoringDecayParameters,
+ last_updated: T,
+ offset_history_last_updated: T,
}
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> ProbabilisticScorer<G, L> where L::Target: Logger {
/// Creates a new scorer using the given scoring parameters for sending payments from a node
/// through a network graph.
pub fn new(decay_params: ProbabilisticScoringDecayParameters, network_graph: G, logger: L) -> Self {
}
#[cfg(test)]
- fn with_channel(mut self, short_channel_id: u64, liquidity: ChannelLiquidity<T>) -> Self {
+ fn with_channel(mut self, short_channel_id: u64, liquidity: ChannelLiquidity) -> Self {
assert!(self.channel_liquidities.insert(short_channel_id, liquidity).is_none());
self
}
/// Note that this writes roughly one line per channel for which we have a liquidity estimate,
/// which may be a substantial amount of log output.
pub fn debug_log_liquidity_stats(&self) {
- let now = T::now();
-
let graph = self.network_graph.read_only();
for (scid, liq) in self.channel_liquidities.iter() {
if let Some(chan_debug) = graph.channels().get(scid) {
let log_direction = |source, target| {
if let Some((directed_info, _)) = chan_debug.as_directed_to(target) {
let amt = directed_info.effective_capacity().as_msat();
- let dir_liq = liq.as_directed(source, target, amt, self.decay_params);
+ let dir_liq = liq.as_directed(source, target, amt);
- let (min_buckets, max_buckets) = dir_liq.liquidity_history
- .get_decayed_buckets(now, *dir_liq.last_updated,
- self.decay_params.historical_no_updates_half_life)
- .unwrap_or(([0; 32], [0; 32]));
+ let min_buckets = &dir_liq.liquidity_history.min_liquidity_offset_history.buckets;
+ let max_buckets = &dir_liq.liquidity_history.max_liquidity_offset_history.buckets;
log_debug!(self.logger, core::concat!(
"Liquidity from {} to {} via {} is in the range ({}, {}).\n",
if let Some(liq) = self.channel_liquidities.get(&scid) {
if let Some((directed_info, source)) = chan.as_directed_to(target) {
let amt = directed_info.effective_capacity().as_msat();
- let dir_liq = liq.as_directed(source, target, amt, self.decay_params);
+ let dir_liq = liq.as_directed(source, target, amt);
return Some((dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat()));
}
}
/// in the top and bottom bucket, and roughly with similar (recent) frequency.
///
/// Because the datapoints are decayed slowly over time, values will eventually return to
- /// `Some(([1; 32], [1; 32]))` and then to `None` once no datapoints remain.
+ /// `Some(([0; 32], [0; 32]))` or `None` if no data remains for a channel.
///
/// In order to fetch a single success probability from the buckets provided here, as used in
/// the scoring model, see [`Self::historical_estimated_payment_success_probability`].
if let Some(liq) = self.channel_liquidities.get(&scid) {
if let Some((directed_info, source)) = chan.as_directed_to(target) {
let amt = directed_info.effective_capacity().as_msat();
- let dir_liq = liq.as_directed(source, target, amt, self.decay_params);
+ let dir_liq = liq.as_directed(source, target, amt);
- let (min_buckets, mut max_buckets) =
- dir_liq.liquidity_history.get_decayed_buckets(
- dir_liq.now, *dir_liq.last_updated,
- self.decay_params.historical_no_updates_half_life
- )?;
+ let min_buckets = dir_liq.liquidity_history.min_liquidity_offset_history.buckets;
+ let mut max_buckets = dir_liq.liquidity_history.max_liquidity_offset_history.buckets;
// Note that the liquidity buckets are an offset from the edge, so we inverse
// the max order to get the probabilities from zero.
if let Some(liq) = self.channel_liquidities.get(&scid) {
if let Some((directed_info, source)) = chan.as_directed_to(target) {
let capacity_msat = directed_info.effective_capacity().as_msat();
- let dir_liq = liq.as_directed(source, target, capacity_msat, self.decay_params);
+ let dir_liq = liq.as_directed(source, target, capacity_msat);
return dir_liq.liquidity_history.calculate_success_probability_times_billion(
- dir_liq.now, *dir_liq.last_updated,
- self.decay_params.historical_no_updates_half_life, ¶ms, amount_msat,
- capacity_msat
+ ¶ms, amount_msat, capacity_msat
).map(|p| p as f64 / (1024 * 1024 * 1024) as f64);
}
}
}
}
-impl<T: Time> ChannelLiquidity<T> {
- #[inline]
- fn new() -> Self {
+impl ChannelLiquidity {
+ fn new(last_updated: Duration) -> Self {
Self {
min_liquidity_offset_msat: 0,
max_liquidity_offset_msat: 0,
min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
- last_updated: T::now(),
+ last_updated,
+ offset_history_last_updated: last_updated,
}
}
/// Returns a view of the channel liquidity directed from `source` to `target` assuming
/// `capacity_msat`.
fn as_directed(
- &self, source: &NodeId, target: &NodeId, capacity_msat: u64, decay_params: ProbabilisticScoringDecayParameters
- ) -> DirectedChannelLiquidity<&u64, &HistoricalBucketRangeTracker, T, &T> {
+ &self, source: &NodeId, target: &NodeId, capacity_msat: u64,
+ ) -> DirectedChannelLiquidity<&u64, &HistoricalBucketRangeTracker, &Duration> {
let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) =
if source < target {
(&self.min_liquidity_offset_msat, &self.max_liquidity_offset_msat,
},
capacity_msat,
last_updated: &self.last_updated,
- now: T::now(),
- decay_params: decay_params,
+ offset_history_last_updated: &self.offset_history_last_updated,
}
}
/// Returns a mutable view of the channel liquidity directed from `source` to `target` assuming
/// `capacity_msat`.
fn as_directed_mut(
- &mut self, source: &NodeId, target: &NodeId, capacity_msat: u64, decay_params: ProbabilisticScoringDecayParameters
- ) -> DirectedChannelLiquidity<&mut u64, &mut HistoricalBucketRangeTracker, T, &mut T> {
+ &mut self, source: &NodeId, target: &NodeId, capacity_msat: u64,
+ ) -> DirectedChannelLiquidity<&mut u64, &mut HistoricalBucketRangeTracker, &mut Duration> {
let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) =
if source < target {
(&mut self.min_liquidity_offset_msat, &mut self.max_liquidity_offset_msat,
},
capacity_msat,
last_updated: &mut self.last_updated,
- now: T::now(),
- decay_params: decay_params,
+ offset_history_last_updated: &mut self.offset_history_last_updated,
+ }
+ }
+
+ fn decayed_offset(
+ &self, offset: u64, duration_since_epoch: Duration,
+ decay_params: ProbabilisticScoringDecayParameters,
+ ) -> u64 {
+ let half_life = decay_params.liquidity_offset_half_life.as_secs_f64();
+ if half_life != 0.0 {
+ let elapsed_time = duration_since_epoch.saturating_sub(self.last_updated).as_secs_f64();
+ ((offset as f64) * powf64(0.5, elapsed_time / half_life)) as u64
+ } else {
+ 0
}
}
}
(numerator, denominator)
}
-impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Time, U: Deref<Target = T>> DirectedChannelLiquidity< L, BRT, T, U> {
+impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Deref<Target = Duration>>
+DirectedChannelLiquidity< L, BRT, T> {
/// Returns a liquidity penalty for routing the given HTLC `amount_msat` through the channel in
/// this direction.
fn penalty_msat(&self, amount_msat: u64, score_params: &ProbabilisticScoringFeeParameters) -> u64 {
if score_params.historical_liquidity_penalty_multiplier_msat != 0 ||
score_params.historical_liquidity_penalty_amount_multiplier_msat != 0 {
if let Some(cumulative_success_prob_times_billion) = self.liquidity_history
- .calculate_success_probability_times_billion(self.now, *self.last_updated,
- self.decay_params.historical_no_updates_half_life, score_params, amount_msat,
- self.capacity_msat)
+ .calculate_success_probability_times_billion(
+ score_params, amount_msat, self.capacity_msat)
{
let historical_negative_log10_times_2048 = approx::negative_log10_times_2048(cumulative_success_prob_times_billion + 1, 1024 * 1024 * 1024);
res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
/// Returns the lower bound of the channel liquidity balance in this direction.
#[inline(always)]
fn min_liquidity_msat(&self) -> u64 {
- self.decayed_offset_msat(*self.min_liquidity_offset_msat)
+ *self.min_liquidity_offset_msat
}
/// Returns the upper bound of the channel liquidity balance in this direction.
#[inline(always)]
fn max_liquidity_msat(&self) -> u64 {
self.capacity_msat
- .saturating_sub(self.decayed_offset_msat(*self.max_liquidity_offset_msat))
- }
-
- fn decayed_offset_msat(&self, offset_msat: u64) -> u64 {
- let half_life = self.decay_params.liquidity_offset_half_life.as_secs();
- if half_life != 0 {
- // Decay the offset by the appropriate number of half lives. If half of the next half
- // life has passed, approximate an additional three-quarter life to help smooth out the
- // decay.
- let elapsed_time = self.now.duration_since(*self.last_updated).as_secs();
- let half_decays = elapsed_time / (half_life / 2);
- let decays = half_decays / 2;
- let decayed_offset_msat = offset_msat.checked_shr(decays as u32).unwrap_or(0);
- if half_decays % 2 == 0 {
- decayed_offset_msat
- } else {
- // 11_585 / 16_384 ~= core::f64::consts::FRAC_1_SQRT_2
- // 16_384 == 2^14
- (decayed_offset_msat as u128 * 11_585 / 16_384) as u64
- }
- } else {
- 0
- }
+ .saturating_sub(*self.max_liquidity_offset_msat)
}
}
-impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTracker>, T: Time, U: DerefMut<Target = T>> DirectedChannelLiquidity<L, BRT, T, U> {
+impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTracker>, T: DerefMut<Target = Duration>>
+DirectedChannelLiquidity<L, BRT, T> {
/// Adjusts the channel liquidity balance bounds when failing to route `amount_msat`.
- fn failed_at_channel<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
+ fn failed_at_channel<Log: Deref>(
+ &mut self, amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log
+ ) where Log::Target: Logger {
let existing_max_msat = self.max_liquidity_msat();
if amount_msat < existing_max_msat {
log_debug!(logger, "Setting max liquidity of {} from {} to {}", chan_descr, existing_max_msat, amount_msat);
- self.set_max_liquidity_msat(amount_msat);
+ self.set_max_liquidity_msat(amount_msat, duration_since_epoch);
} else {
log_trace!(logger, "Max liquidity of {} is {} (already less than or equal to {})",
chan_descr, existing_max_msat, amount_msat);
}
- self.update_history_buckets(0);
+ self.update_history_buckets(0, duration_since_epoch);
}
/// Adjusts the channel liquidity balance bounds when failing to route `amount_msat` downstream.
- fn failed_downstream<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
+ fn failed_downstream<Log: Deref>(
+ &mut self, amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log
+ ) where Log::Target: Logger {
let existing_min_msat = self.min_liquidity_msat();
if amount_msat > existing_min_msat {
log_debug!(logger, "Setting min liquidity of {} from {} to {}", existing_min_msat, chan_descr, amount_msat);
- self.set_min_liquidity_msat(amount_msat);
+ self.set_min_liquidity_msat(amount_msat, duration_since_epoch);
} else {
log_trace!(logger, "Min liquidity of {} is {} (already greater than or equal to {})",
chan_descr, existing_min_msat, amount_msat);
}
- self.update_history_buckets(0);
+ self.update_history_buckets(0, duration_since_epoch);
}
/// Adjusts the channel liquidity balance bounds when successfully routing `amount_msat`.
- fn successful<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
+ fn successful<Log: Deref>(&mut self,
+ amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log
+ ) where Log::Target: Logger {
let max_liquidity_msat = self.max_liquidity_msat().checked_sub(amount_msat).unwrap_or(0);
log_debug!(logger, "Subtracting {} from max liquidity of {} (setting it to {})", amount_msat, chan_descr, max_liquidity_msat);
- self.set_max_liquidity_msat(max_liquidity_msat);
- self.update_history_buckets(amount_msat);
+ self.set_max_liquidity_msat(max_liquidity_msat, duration_since_epoch);
+ self.update_history_buckets(amount_msat, duration_since_epoch);
}
/// Updates the history buckets for this channel. Because the history buckets track what we now
/// know about the channel's state *prior to our payment* (i.e. what we assume is "steady
/// state"), we allow the caller to set an offset applied to our liquidity bounds which
/// represents the amount of the successful payment we just made.
- fn update_history_buckets(&mut self, bucket_offset_msat: u64) {
- let half_lives = self.now.duration_since(*self.last_updated).as_secs()
- .checked_div(self.decay_params.historical_no_updates_half_life.as_secs())
- .map(|v| v.try_into().unwrap_or(u32::max_value())).unwrap_or(u32::max_value());
- self.liquidity_history.min_liquidity_offset_history.time_decay_data(half_lives);
- self.liquidity_history.max_liquidity_offset_history.time_decay_data(half_lives);
-
- let min_liquidity_offset_msat = self.decayed_offset_msat(*self.min_liquidity_offset_msat);
+ fn update_history_buckets(&mut self, bucket_offset_msat: u64, duration_since_epoch: Duration) {
self.liquidity_history.min_liquidity_offset_history.track_datapoint(
- min_liquidity_offset_msat + bucket_offset_msat, self.capacity_msat
+ *self.min_liquidity_offset_msat + bucket_offset_msat, self.capacity_msat
);
- let max_liquidity_offset_msat = self.decayed_offset_msat(*self.max_liquidity_offset_msat);
self.liquidity_history.max_liquidity_offset_history.track_datapoint(
- max_liquidity_offset_msat.saturating_sub(bucket_offset_msat), self.capacity_msat
+ self.max_liquidity_offset_msat.saturating_sub(bucket_offset_msat), self.capacity_msat
);
+ *self.offset_history_last_updated = duration_since_epoch;
}
/// Adjusts the lower bound of the channel liquidity balance in this direction.
- fn set_min_liquidity_msat(&mut self, amount_msat: u64) {
+ fn set_min_liquidity_msat(&mut self, amount_msat: u64, duration_since_epoch: Duration) {
*self.min_liquidity_offset_msat = amount_msat;
- *self.max_liquidity_offset_msat = if amount_msat > self.max_liquidity_msat() {
- 0
- } else {
- self.decayed_offset_msat(*self.max_liquidity_offset_msat)
- };
- *self.last_updated = self.now;
+ if amount_msat > self.max_liquidity_msat() {
+ *self.max_liquidity_offset_msat = 0;
+ }
+ *self.last_updated = duration_since_epoch;
}
/// Adjusts the upper bound of the channel liquidity balance in this direction.
- fn set_max_liquidity_msat(&mut self, amount_msat: u64) {
+ fn set_max_liquidity_msat(&mut self, amount_msat: u64, duration_since_epoch: Duration) {
*self.max_liquidity_offset_msat = self.capacity_msat.checked_sub(amount_msat).unwrap_or(0);
- *self.min_liquidity_offset_msat = if amount_msat < self.min_liquidity_msat() {
- 0
- } else {
- self.decayed_offset_msat(*self.min_liquidity_offset_msat)
- };
- *self.last_updated = self.now;
+ if amount_msat < *self.min_liquidity_offset_msat {
+ *self.min_liquidity_offset_msat = 0;
+ }
+ *self.last_updated = duration_since_epoch;
}
}
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreLookUp for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> ScoreLookUp for ProbabilisticScorer<G, L> where L::Target: Logger {
type ScoreParams = ProbabilisticScoringFeeParameters;
fn channel_penalty_msat(
&self, candidate: &CandidateRouteHop, usage: ChannelUsage, score_params: &ProbabilisticScoringFeeParameters
let capacity_msat = usage.effective_capacity.as_msat();
self.channel_liquidities
.get(&scid)
- .unwrap_or(&ChannelLiquidity::new())
- .as_directed(&source, &target, capacity_msat, self.decay_params)
+ .unwrap_or(&ChannelLiquidity::new(Duration::ZERO))
+ .as_directed(&source, &target, capacity_msat)
.penalty_msat(amount_msat, score_params)
.saturating_add(anti_probing_penalty_msat)
.saturating_add(base_penalty_msat)
}
}
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreUpdate for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
- fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> ScoreUpdate for ProbabilisticScorer<G, L> where L::Target: Logger {
+ fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
let amount_msat = path.final_value_msat();
log_trace!(self.logger, "Scoring path through to SCID {} as having failed at {} msat", short_channel_id, amount_msat);
let network_graph = self.network_graph.read_only();
if at_failed_channel {
self.channel_liquidities
.entry(hop.short_channel_id)
- .or_insert_with(ChannelLiquidity::new)
- .as_directed_mut(source, &target, capacity_msat, self.decay_params)
- .failed_at_channel(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
+ .or_insert_with(|| ChannelLiquidity::new(duration_since_epoch))
+ .as_directed_mut(source, &target, capacity_msat)
+ .failed_at_channel(amount_msat, duration_since_epoch,
+ format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
} else {
self.channel_liquidities
.entry(hop.short_channel_id)
- .or_insert_with(ChannelLiquidity::new)
- .as_directed_mut(source, &target, capacity_msat, self.decay_params)
- .failed_downstream(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
+ .or_insert_with(|| ChannelLiquidity::new(duration_since_epoch))
+ .as_directed_mut(source, &target, capacity_msat)
+ .failed_downstream(amount_msat, duration_since_epoch,
+ format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
}
} else {
log_debug!(self.logger, "Not able to penalize channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
}
}
- fn payment_path_successful(&mut self, path: &Path) {
+ fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
let amount_msat = path.final_value_msat();
log_trace!(self.logger, "Scoring path through SCID {} as having succeeded at {} msat.",
path.hops.split_last().map(|(hop, _)| hop.short_channel_id).unwrap_or(0), amount_msat);
let capacity_msat = channel.effective_capacity().as_msat();
self.channel_liquidities
.entry(hop.short_channel_id)
- .or_insert_with(ChannelLiquidity::new)
- .as_directed_mut(source, &target, capacity_msat, self.decay_params)
- .successful(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
+ .or_insert_with(|| ChannelLiquidity::new(duration_since_epoch))
+ .as_directed_mut(source, &target, capacity_msat)
+ .successful(amount_msat, duration_since_epoch,
+ format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
} else {
log_debug!(self.logger, "Not able to learn for channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
hop.short_channel_id);
}
}
- fn probe_failed(&mut self, path: &Path, short_channel_id: u64) {
- self.payment_path_failed(path, short_channel_id)
+ fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
+ self.payment_path_failed(path, short_channel_id, duration_since_epoch)
+ }
+
+ fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
+ self.payment_path_failed(path, u64::max_value(), duration_since_epoch)
}
- fn probe_successful(&mut self, path: &Path) {
- self.payment_path_failed(path, u64::max_value())
+ fn time_passed(&mut self, duration_since_epoch: Duration) {
+ let decay_params = self.decay_params;
+ self.channel_liquidities.retain(|_scid, liquidity| {
+ liquidity.min_liquidity_offset_msat =
+ liquidity.decayed_offset(liquidity.min_liquidity_offset_msat, duration_since_epoch, decay_params);
+ liquidity.max_liquidity_offset_msat =
+ liquidity.decayed_offset(liquidity.max_liquidity_offset_msat, duration_since_epoch, decay_params);
+ liquidity.last_updated = duration_since_epoch;
+
+ let elapsed_time =
+ duration_since_epoch.saturating_sub(liquidity.offset_history_last_updated);
+ if elapsed_time > decay_params.historical_no_updates_half_life {
+ let half_life = decay_params.historical_no_updates_half_life.as_secs_f64();
+ if half_life != 0.0 {
+ let divisor = powf64(2048.0, elapsed_time.as_secs_f64() / half_life) as u64;
+ for bucket in liquidity.min_liquidity_offset_history.buckets.iter_mut() {
+ *bucket = ((*bucket as u64) * 1024 / divisor) as u16;
+ }
+ for bucket in liquidity.max_liquidity_offset_history.buckets.iter_mut() {
+ *bucket = ((*bucket as u64) * 1024 / divisor) as u16;
+ }
+ liquidity.offset_history_last_updated = duration_since_epoch;
+ }
+ }
+ liquidity.min_liquidity_offset_msat != 0 || liquidity.max_liquidity_offset_msat != 0 ||
+ liquidity.min_liquidity_offset_history.buckets != [0; 32] ||
+ liquidity.max_liquidity_offset_history.buckets != [0; 32]
+ });
}
}
#[cfg(c_bindings)]
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime<G, L, T>
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> Score for ProbabilisticScorer<G, L>
where L::Target: Logger {}
+#[cfg(feature = "std")]
+#[inline]
+fn powf64(n: f64, exp: f64) -> f64 {
+ n.powf(exp)
+}
+#[cfg(not(feature = "std"))]
+fn powf64(n: f64, exp: f64) -> f64 {
+ libm::powf(n as f32, exp as f32) as f64
+}
+
mod approx {
const BITS: u32 = 64;
const HIGHEST_BIT: u32 = BITS - 1;
/// in each of 32 buckets.
#[derive(Clone, Copy)]
pub(super) struct HistoricalBucketRangeTracker {
- buckets: [u16; 32],
+ pub(super) buckets: [u16; 32],
}
/// Buckets are stored in fixed point numbers with a 5 bit fractional part. Thus, the value
self.buckets[bucket] = self.buckets[bucket].saturating_add(BUCKET_FIXED_POINT_ONE);
}
}
- /// Decay all buckets by the given number of half-lives. Used to more aggressively remove old
- /// datapoints as we receive newer information.
- #[inline]
- pub(super) fn time_decay_data(&mut self, half_lives: u32) {
- for e in self.buckets.iter_mut() {
- *e = e.checked_shr(half_lives).unwrap_or(0);
- }
- }
}
impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
}
impl<D: Deref<Target = HistoricalBucketRangeTracker>> HistoricalMinMaxBuckets<D> {
- pub(super) fn get_decayed_buckets<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
- -> Option<([u16; 32], [u16; 32])> {
- let (_, required_decays) = self.get_total_valid_points(now, last_updated, half_life)?;
-
- let mut min_buckets = *self.min_liquidity_offset_history;
- min_buckets.time_decay_data(required_decays);
- let mut max_buckets = *self.max_liquidity_offset_history;
- max_buckets.time_decay_data(required_decays);
- Some((min_buckets.buckets, max_buckets.buckets))
- }
#[inline]
- pub(super) fn get_total_valid_points<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
- -> Option<(u64, u32)> {
- let required_decays = now.duration_since(last_updated).as_secs()
- .checked_div(half_life.as_secs())
- .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
+ pub(super) fn calculate_success_probability_times_billion(
+ &self, params: &ProbabilisticScoringFeeParameters, amount_msat: u64,
+ capacity_msat: u64
+ ) -> Option<u64> {
+ // If historical penalties are enabled, we try to calculate a probability of success
+ // given our historical distribution of min- and max-liquidity bounds in a channel.
+ // To do so, we walk the set of historical liquidity bucket (min, max) combinations
+ // (where min_idx < max_idx, as having a minimum above our maximum is an invalid
+ // state). For each pair, we calculate the probability as if the bucket's corresponding
+ // min- and max- liquidity bounds were our current liquidity bounds and then multiply
+ // that probability by the weight of the selected buckets.
+ let payment_pos = amount_to_pos(amount_msat, capacity_msat);
+ if payment_pos >= POSITION_TICKS { return None; }
let mut total_valid_points_tracked = 0;
for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
// If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme),
// treat it as if we were fully decayed.
const FULLY_DECAYED: u16 = BUCKET_FIXED_POINT_ONE * BUCKET_FIXED_POINT_ONE;
- if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < FULLY_DECAYED.into() {
+ if total_valid_points_tracked < FULLY_DECAYED.into() {
return None;
}
- Some((total_valid_points_tracked, required_decays))
- }
-
- #[inline]
- pub(super) fn calculate_success_probability_times_billion<T: Time>(
- &self, now: T, last_updated: T, half_life: Duration,
- params: &ProbabilisticScoringFeeParameters, amount_msat: u64, capacity_msat: u64
- ) -> Option<u64> {
- // If historical penalties are enabled, we try to calculate a probability of success
- // given our historical distribution of min- and max-liquidity bounds in a channel.
- // To do so, we walk the set of historical liquidity bucket (min, max) combinations
- // (where min_idx < max_idx, as having a minimum above our maximum is an invalid
- // state). For each pair, we calculate the probability as if the bucket's corresponding
- // min- and max- liquidity bounds were our current liquidity bounds and then multiply
- // that probability by the weight of the selected buckets.
- let payment_pos = amount_to_pos(amount_msat, capacity_msat);
- if payment_pos >= POSITION_TICKS { return None; }
-
- // Check if all our buckets are zero, once decayed and treat it as if we had no data. We
- // don't actually use the decayed buckets, though, as that would lose precision.
- let (total_valid_points_tracked, _)
- = self.get_total_valid_points(now, last_updated, half_life)?;
-
let mut cumulative_success_prob_times_billion = 0;
// Special-case the 0th min bucket - it generally means we failed a payment, so only
// consider the highest (i.e. largest-offset-from-max-capacity) max bucket for all
}
use bucketed_history::{LegacyHistoricalBucketRangeTracker, HistoricalBucketRangeTracker, HistoricalMinMaxBuckets};
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Writeable for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> Writeable for ProbabilisticScorer<G, L> where L::Target: Logger {
#[inline]
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
write_tlv_fields!(w, {
}
}
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time>
-ReadableArgs<(ProbabilisticScoringDecayParameters, G, L)> for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref>
+ReadableArgs<(ProbabilisticScoringDecayParameters, G, L)> for ProbabilisticScorer<G, L> where L::Target: Logger {
#[inline]
fn read<R: Read>(
r: &mut R, args: (ProbabilisticScoringDecayParameters, G, L)
}
}
-impl<T: Time> Writeable for ChannelLiquidity<T> {
+impl Writeable for ChannelLiquidity {
#[inline]
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
- let duration_since_epoch = T::duration_since_epoch() - self.last_updated.elapsed();
write_tlv_fields!(w, {
(0, self.min_liquidity_offset_msat, required),
// 1 was the min_liquidity_offset_history in octile form
(2, self.max_liquidity_offset_msat, required),
// 3 was the max_liquidity_offset_history in octile form
- (4, duration_since_epoch, required),
+ (4, self.last_updated, required),
(5, Some(self.min_liquidity_offset_history), option),
(7, Some(self.max_liquidity_offset_history), option),
+ (9, self.offset_history_last_updated, required),
});
Ok(())
}
}
-impl<T: Time> Readable for ChannelLiquidity<T> {
+impl Readable for ChannelLiquidity {
#[inline]
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
let mut min_liquidity_offset_msat = 0;
let mut legacy_max_liq_offset_history: Option<LegacyHistoricalBucketRangeTracker> = None;
let mut min_liquidity_offset_history: Option<HistoricalBucketRangeTracker> = None;
let mut max_liquidity_offset_history: Option<HistoricalBucketRangeTracker> = None;
- let mut duration_since_epoch = Duration::from_secs(0);
+ let mut last_updated = Duration::from_secs(0);
+ let mut offset_history_last_updated = None;
read_tlv_fields!(r, {
(0, min_liquidity_offset_msat, required),
(1, legacy_min_liq_offset_history, option),
(2, max_liquidity_offset_msat, required),
(3, legacy_max_liq_offset_history, option),
- (4, duration_since_epoch, required),
+ (4, last_updated, required),
(5, min_liquidity_offset_history, option),
(7, max_liquidity_offset_history, option),
+ (9, offset_history_last_updated, option),
});
- // On rust prior to 1.60 `Instant::duration_since` will panic if time goes backwards.
- // We write `last_updated` as wallclock time even though its ultimately an `Instant` (which
- // is a time from a monotonic clock usually represented as an offset against boot time).
- // Thus, we have to construct an `Instant` by subtracting the difference in wallclock time
- // from the one that was written. However, because `Instant` can panic if we construct one
- // in the future, we must handle wallclock time jumping backwards, which we do by simply
- // using `Instant::now()` in that case.
- let wall_clock_now = T::duration_since_epoch();
- let now = T::now();
- let last_updated = if wall_clock_now > duration_since_epoch {
- now - (wall_clock_now - duration_since_epoch)
- } else { now };
+
if min_liquidity_offset_history.is_none() {
if let Some(legacy_buckets) = legacy_min_liq_offset_history {
min_liquidity_offset_history = Some(legacy_buckets.into_current());
min_liquidity_offset_history: min_liquidity_offset_history.unwrap(),
max_liquidity_offset_history: max_liquidity_offset_history.unwrap(),
last_updated,
+ offset_history_last_updated: offset_history_last_updated.unwrap_or(last_updated),
})
}
}
#[cfg(test)]
mod tests {
- use super::{ChannelLiquidity, HistoricalBucketRangeTracker, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters, ProbabilisticScorerUsingTime};
+ use super::{ChannelLiquidity, HistoricalBucketRangeTracker, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters, ProbabilisticScorer};
use crate::blinded_path::{BlindedHop, BlindedPath};
use crate::util::config::UserConfig;
- use crate::util::time::Time;
- use crate::util::time::tests::SinceEpoch;
use crate::ln::channelmanager;
use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
// `ProbabilisticScorer` tests
- /// A probabilistic scorer for testing with time that can be manually advanced.
- type ProbabilisticScorer<'a> = ProbabilisticScorerUsingTime::<&'a NetworkGraph<&'a TestLogger>, &'a TestLogger, SinceEpoch>;
-
fn sender_privkey() -> SecretKey {
SecretKey::from_slice(&[41; 32]).unwrap()
}
#[test]
fn liquidity_bounds_directed_from_lowest_node_id() {
let logger = TestLogger::new();
- let last_updated = SinceEpoch::now();
+ let last_updated = Duration::ZERO;
+ let offset_history_last_updated = Duration::ZERO;
let network_graph = network_graph(&logger);
let decay_params = ProbabilisticScoringDecayParameters::default();
let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
- min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated,
+ min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100,
+ last_updated, offset_history_last_updated,
min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
})
.with_channel(43,
ChannelLiquidity {
- min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated,
+ min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100,
+ last_updated, offset_history_last_updated,
min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
});
// Update minimum liquidity.
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, decay_params);
+ .as_directed(&source, &target, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 100);
assert_eq!(liquidity.max_liquidity_msat(), 300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, decay_params);
+ .as_directed(&target, &source, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 900);
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&source, &target, 1_000, decay_params)
- .set_min_liquidity_msat(200);
+ .as_directed_mut(&source, &target, 1_000)
+ .set_min_liquidity_msat(200, Duration::ZERO);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, decay_params);
+ .as_directed(&source, &target, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 200);
assert_eq!(liquidity.max_liquidity_msat(), 300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, decay_params);
+ .as_directed(&target, &source, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 800);
// Update maximum liquidity.
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&target, &recipient, 1_000, decay_params);
+ .as_directed(&target, &recipient, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 900);
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&recipient, &target, 1_000, decay_params);
+ .as_directed(&recipient, &target, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 100);
assert_eq!(liquidity.max_liquidity_msat(), 300);
scorer.channel_liquidities.get_mut(&43).unwrap()
- .as_directed_mut(&target, &recipient, 1_000, decay_params)
- .set_max_liquidity_msat(200);
+ .as_directed_mut(&target, &recipient, 1_000)
+ .set_max_liquidity_msat(200, Duration::ZERO);
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&target, &recipient, 1_000, decay_params);
+ .as_directed(&target, &recipient, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 200);
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&recipient, &target, 1_000, decay_params);
+ .as_directed(&recipient, &target, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 800);
assert_eq!(liquidity.max_liquidity_msat(), 1000);
}
#[test]
fn resets_liquidity_upper_bound_when_crossed_by_lower_bound() {
let logger = TestLogger::new();
- let last_updated = SinceEpoch::now();
+ let last_updated = Duration::ZERO;
+ let offset_history_last_updated = Duration::ZERO;
let network_graph = network_graph(&logger);
let decay_params = ProbabilisticScoringDecayParameters::default();
let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
- min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated,
+ min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400,
+ last_updated, offset_history_last_updated,
min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
});
// Check initial bounds.
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, decay_params);
+ .as_directed(&source, &target, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 800);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, decay_params);
+ .as_directed(&target, &source, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 200);
assert_eq!(liquidity.max_liquidity_msat(), 600);
// Reset from source to target.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&source, &target, 1_000, decay_params)
- .set_min_liquidity_msat(900);
+ .as_directed_mut(&source, &target, 1_000)
+ .set_min_liquidity_msat(900, Duration::ZERO);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, decay_params);
+ .as_directed(&source, &target, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 900);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, decay_params);
+ .as_directed(&target, &source, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 100);
// Reset from target to source.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&target, &source, 1_000, decay_params)
- .set_min_liquidity_msat(400);
+ .as_directed_mut(&target, &source, 1_000)
+ .set_min_liquidity_msat(400, Duration::ZERO);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, decay_params);
+ .as_directed(&source, &target, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 600);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, decay_params);
+ .as_directed(&target, &source, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
}
#[test]
fn resets_liquidity_lower_bound_when_crossed_by_upper_bound() {
let logger = TestLogger::new();
- let last_updated = SinceEpoch::now();
+ let last_updated = Duration::ZERO;
+ let offset_history_last_updated = Duration::ZERO;
let network_graph = network_graph(&logger);
let decay_params = ProbabilisticScoringDecayParameters::default();
let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
- min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated,
+ min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400,
+ last_updated, offset_history_last_updated,
min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
});
// Check initial bounds.
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, decay_params);
+ .as_directed(&source, &target, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 800);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, decay_params);
+ .as_directed(&target, &source, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 200);
assert_eq!(liquidity.max_liquidity_msat(), 600);
// Reset from source to target.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&source, &target, 1_000, decay_params)
- .set_max_liquidity_msat(300);
+ .as_directed_mut(&source, &target, 1_000)
+ .set_max_liquidity_msat(300, Duration::ZERO);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, decay_params);
+ .as_directed(&source, &target, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, decay_params);
+ .as_directed(&target, &source, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
// Reset from target to source.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&target, &source, 1_000, decay_params)
- .set_max_liquidity_msat(600);
+ .as_directed_mut(&target, &source, 1_000)
+ .set_max_liquidity_msat(600, Duration::ZERO);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, decay_params);
+ .as_directed(&source, &target, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 1_000, decay_params);
+ .as_directed(&target, &source, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 600);
}
#[test]
fn constant_penalty_outside_liquidity_bounds() {
let logger = TestLogger::new();
- let last_updated = SinceEpoch::now();
+ let last_updated = Duration::ZERO;
+ let offset_history_last_updated = Duration::ZERO;
let network_graph = network_graph(&logger);
let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
- min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40, last_updated,
+ min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40,
+ last_updated, offset_history_last_updated,
min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
});
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 301);
- scorer.payment_path_failed(&failed_path, 41);
+ scorer.payment_path_failed(&failed_path, 41, Duration::ZERO);
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 301);
- scorer.payment_path_successful(&successful_path);
+ scorer.payment_path_successful(&successful_path, Duration::ZERO);
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 301);
}
let usage = ChannelUsage { amount_msat: 750, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 602);
- scorer.payment_path_failed(&path, 43);
+ scorer.payment_path_failed(&path, 43, Duration::ZERO);
let usage = ChannelUsage { amount_msat: 250, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 750, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 602);
- scorer.payment_path_failed(&path, 42);
+ scorer.payment_path_failed(&path, 42, Duration::ZERO);
let usage = ChannelUsage { amount_msat: 250, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 300);
};
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 128);
- scorer.payment_path_failed(&Path { hops: path, blinded_tail: None }, 43);
+ scorer.payment_path_failed(&Path { hops: path, blinded_tail: None }, 43, Duration::ZERO);
let channel = network_graph.read_only().channel(42).unwrap().to_owned();
let (info, _) = channel.as_directed_from(&node_a).unwrap();
assert_eq!(scorer.channel_penalty_msat(&candidate_42, usage, ¶ms), 128);
assert_eq!(scorer.channel_penalty_msat(&candidate_43, usage, ¶ms), 128);
- scorer.payment_path_successful(&payment_path_for_amount(500));
+ scorer.payment_path_successful(&payment_path_for_amount(500), Duration::ZERO);
assert_eq!(scorer.channel_penalty_msat(&candidate_41, usage, ¶ms), 128);
assert_eq!(scorer.channel_penalty_msat(&candidate_42, usage, ¶ms), 300);
let usage = ChannelUsage { amount_msat: 1_023, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 2_000);
- scorer.payment_path_failed(&payment_path_for_amount(768), 42);
- scorer.payment_path_failed(&payment_path_for_amount(128), 43);
+ scorer.payment_path_failed(&payment_path_for_amount(768), 42, Duration::ZERO);
+ scorer.payment_path_failed(&payment_path_for_amount(128), 43, Duration::ZERO);
// Initial penalties
let usage = ChannelUsage { amount_msat: 128, ..usage };
let usage = ChannelUsage { amount_msat: 896, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
- // No decay
- SinceEpoch::advance(Duration::from_secs(4));
- let usage = ChannelUsage { amount_msat: 128, ..usage };
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
- let usage = ChannelUsage { amount_msat: 256, ..usage };
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 93);
- let usage = ChannelUsage { amount_msat: 768, ..usage };
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 1_479);
- let usage = ChannelUsage { amount_msat: 896, ..usage };
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
-
// Half decay (i.e., three-quarter life)
- SinceEpoch::advance(Duration::from_secs(1));
+ scorer.time_passed(Duration::from_secs(5));
let usage = ChannelUsage { amount_msat: 128, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 22);
let usage = ChannelUsage { amount_msat: 256, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
// One decay (i.e., half life)
- SinceEpoch::advance(Duration::from_secs(5));
+ scorer.time_passed(Duration::from_secs(10));
let usage = ChannelUsage { amount_msat: 64, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 128, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
// Fully decay liquidity lower bound.
- SinceEpoch::advance(Duration::from_secs(10 * 7));
+ scorer.time_passed(Duration::from_secs(10 * 8));
let usage = ChannelUsage { amount_msat: 0, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 1, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
// Fully decay liquidity upper bound.
- SinceEpoch::advance(Duration::from_secs(10));
+ scorer.time_passed(Duration::from_secs(10 * 9));
let usage = ChannelUsage { amount_msat: 0, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 1_024, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
- SinceEpoch::advance(Duration::from_secs(10));
+ scorer.time_passed(Duration::from_secs(10 * 10));
let usage = ChannelUsage { amount_msat: 0, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 1_024, ..usage };
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
}
- #[test]
- fn decays_liquidity_bounds_without_shift_overflow() {
- let logger = TestLogger::new();
- let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringFeeParameters {
- liquidity_penalty_multiplier_msat: 1_000,
- ..ProbabilisticScoringFeeParameters::zero_penalty()
- };
- let decay_params = ProbabilisticScoringDecayParameters {
- liquidity_offset_half_life: Duration::from_secs(10),
- ..ProbabilisticScoringDecayParameters::default()
- };
- let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
- let source = source_node_id();
- let usage = ChannelUsage {
- amount_msat: 256,
- inflight_htlc_msat: 0,
- effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
- };
- let channel = network_graph.read_only().channel(42).unwrap().to_owned();
- let (info, _) = channel.as_directed_from(&source).unwrap();
- let candidate = CandidateRouteHop::PublicHop {
- info,
- short_channel_id: 42,
- };
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 125);
-
- scorer.payment_path_failed(&payment_path_for_amount(512), 42);
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 281);
-
- // An unchecked right shift 64 bits or more in DirectedChannelLiquidity::decayed_offset_msat
- // would cause an overflow.
- SinceEpoch::advance(Duration::from_secs(10 * 64));
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 125);
-
- SinceEpoch::advance(Duration::from_secs(10));
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 125);
- }
-
#[test]
fn restricts_liquidity_bounds_after_decay() {
let logger = TestLogger::new();
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 300);
// More knowledge gives higher confidence (256, 768), meaning a lower penalty.
- scorer.payment_path_failed(&payment_path_for_amount(768), 42);
- scorer.payment_path_failed(&payment_path_for_amount(256), 43);
+ scorer.payment_path_failed(&payment_path_for_amount(768), 42, Duration::ZERO);
+ scorer.payment_path_failed(&payment_path_for_amount(256), 43, Duration::ZERO);
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 281);
// Decaying knowledge gives less confidence (128, 896), meaning a higher penalty.
- SinceEpoch::advance(Duration::from_secs(10));
+ scorer.time_passed(Duration::from_secs(10));
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 291);
// Reducing the upper bound gives more confidence (128, 832) that the payment amount (512)
// is closer to the upper bound, meaning a higher penalty.
- scorer.payment_path_successful(&payment_path_for_amount(64));
+ scorer.payment_path_successful(&payment_path_for_amount(64), Duration::from_secs(10));
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 331);
// Increasing the lower bound gives more confidence (256, 832) that the payment amount (512)
// is closer to the lower bound, meaning a lower penalty.
- scorer.payment_path_failed(&payment_path_for_amount(256), 43);
+ scorer.payment_path_failed(&payment_path_for_amount(256), 43, Duration::from_secs(10));
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 245);
// Further decaying affects the lower bound more than the upper bound (128, 928).
- SinceEpoch::advance(Duration::from_secs(10));
+ scorer.time_passed(Duration::from_secs(20));
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 280);
}
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
};
- scorer.payment_path_failed(&payment_path_for_amount(500), 42);
+ scorer.payment_path_failed(&payment_path_for_amount(500), 42, Duration::ZERO);
let channel = network_graph.read_only().channel(42).unwrap().to_owned();
let (info, _) = channel.as_directed_from(&source).unwrap();
let candidate = CandidateRouteHop::PublicHop {
};
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
- SinceEpoch::advance(Duration::from_secs(10));
+ scorer.time_passed(Duration::from_secs(10));
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 473);
- scorer.payment_path_failed(&payment_path_for_amount(250), 43);
+ scorer.payment_path_failed(&payment_path_for_amount(250), 43, Duration::from_secs(10));
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 300);
let mut serialized_scorer = Vec::new();
let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
let deserialized_scorer =
- <ProbabilisticScorer>::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap();
+ <ProbabilisticScorer<_, _>>::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap();
assert_eq!(deserialized_scorer.channel_penalty_msat(&candidate, usage, ¶ms), 300);
}
- #[test]
- fn decays_persisted_liquidity_bounds() {
+ fn do_decays_persisted_liquidity_bounds(decay_before_reload: bool) {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
let params = ProbabilisticScoringFeeParameters {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
};
- scorer.payment_path_failed(&payment_path_for_amount(500), 42);
+ scorer.payment_path_failed(&payment_path_for_amount(500), 42, Duration::ZERO);
let channel = network_graph.read_only().channel(42).unwrap().to_owned();
let (info, _) = channel.as_directed_from(&source).unwrap();
let candidate = CandidateRouteHop::PublicHop {
};
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), u64::max_value());
+ if decay_before_reload {
+ scorer.time_passed(Duration::from_secs(10));
+ }
+
let mut serialized_scorer = Vec::new();
scorer.write(&mut serialized_scorer).unwrap();
- SinceEpoch::advance(Duration::from_secs(10));
-
let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
- let deserialized_scorer =
- <ProbabilisticScorer>::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap();
+ let mut deserialized_scorer =
+ <ProbabilisticScorer<_, _>>::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap();
+ if !decay_before_reload {
+ scorer.time_passed(Duration::from_secs(10));
+ deserialized_scorer.time_passed(Duration::from_secs(10));
+ }
assert_eq!(deserialized_scorer.channel_penalty_msat(&candidate, usage, ¶ms), 473);
- scorer.payment_path_failed(&payment_path_for_amount(250), 43);
+ scorer.payment_path_failed(&payment_path_for_amount(250), 43, Duration::from_secs(10));
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 300);
- SinceEpoch::advance(Duration::from_secs(10));
+ deserialized_scorer.time_passed(Duration::from_secs(20));
assert_eq!(deserialized_scorer.channel_penalty_msat(&candidate, usage, ¶ms), 370);
}
+ #[test]
+ fn decays_persisted_liquidity_bounds() {
+ do_decays_persisted_liquidity_bounds(false);
+ do_decays_persisted_liquidity_bounds(true);
+ }
+
#[test]
fn scores_realistic_payments() {
// Shows the scores of "realistic" sends of 100k sats over channels of 1-10m sats (with a
assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 42, ¶ms),
None);
- scorer.payment_path_failed(&payment_path_for_amount(1), 42);
+ scorer.payment_path_failed(&payment_path_for_amount(1), 42, Duration::ZERO);
{
let network_graph = network_graph.read_only();
let channel = network_graph.channel(42).unwrap();
// Even after we tell the scorer we definitely have enough available liquidity, it will
// still remember that there was some failure in the past, and assign a non-0 penalty.
- scorer.payment_path_failed(&payment_path_for_amount(1000), 43);
+ scorer.payment_path_failed(&payment_path_for_amount(1000), 43, Duration::ZERO);
{
let network_graph = network_graph.read_only();
let channel = network_graph.channel(42).unwrap();
// Advance the time forward 16 half-lives (which the docs claim will ensure all data is
// gone), and check that we're back to where we started.
- SinceEpoch::advance(Duration::from_secs(10 * 16));
+ scorer.time_passed(Duration::from_secs(10 * 16));
{
let network_graph = network_graph.read_only();
let channel = network_graph.channel(42).unwrap();
// Once fully decayed we still have data, but its all-0s. In the future we may remove the
// data entirely instead.
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
- None);
+ Some(([0; 32], [0; 32])));
assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 1, ¶ms), None);
- let mut usage = ChannelUsage {
+ let usage = ChannelUsage {
amount_msat: 100,
inflight_htlc_msat: 1024,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
};
- scorer.payment_path_failed(&payment_path_for_amount(1), 42);
+ scorer.payment_path_failed(&payment_path_for_amount(1), 42, Duration::from_secs(10 * 16));
{
let network_graph = network_graph.read_only();
let channel = network_graph.channel(42).unwrap();
};
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 2050);
- usage.inflight_htlc_msat = 0;
- assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms), 866);
let usage = ChannelUsage {
amount_msat: 1,
}
// Advance to decay all liquidity offsets to zero.
- SinceEpoch::advance(Duration::from_secs(60 * 60 * 10));
+ scorer.time_passed(Duration::from_secs(10 * (16 + 60 * 60)));
+
+ // Once even the bounds have decayed information about the channel should be removed
+ // entirely.
+ assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
+ None);
// Use a path in the opposite direction, which have zero for htlc_maximum_msat. This will
// ensure that the effective capacity is zero to test division-by-zero edge cases.
path_hop(source_pubkey(), 42, 1),
path_hop(sender_pubkey(), 41, 0),
];
- scorer.payment_path_failed(&Path { hops: path, blinded_tail: None }, 42);
+ scorer.payment_path_failed(&Path { hops: path, blinded_tail: None }, 42, Duration::from_secs(10 * (16 + 60 * 60)));
}
#[test]
// final value is taken into account.
assert!(scorer.channel_liquidities.get(&42).is_none());
- scorer.payment_path_failed(&path, 42);
+ scorer.payment_path_failed(&path, 42, Duration::ZERO);
path.blinded_tail.as_mut().unwrap().final_value_msat = 256;
- scorer.payment_path_failed(&path, 43);
+ scorer.payment_path_failed(&path, 43, Duration::ZERO);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 1_000, decay_params);
+ .as_directed(&source, &target, 1_000);
assert_eq!(liquidity.min_liquidity_msat(), 256);
assert_eq!(liquidity.max_liquidity_msat(), 768);
}
None);
// Fail to pay once, and then check the buckets and penalty.
- scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42);
+ scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42, Duration::ZERO);
// The penalty should be the maximum penalty, as the payment we're scoring is now in the
// same bucket which is the only maximum datapoint.
assert_eq!(scorer.channel_penalty_msat(&candidate, usage, ¶ms),
// ...but once we see a failure, we consider the payment to be substantially less likely,
// even though not a probability of zero as we still look at the second max bucket which
// now shows 31.
- scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42);
+ scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42, Duration::ZERO);
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
Some(([63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[32, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])));
Some(0.0));
}
}
+
+#[cfg(ldk_bench)]
+pub mod benches {
+ use super::*;
+ use criterion::Criterion;
+ use crate::routing::router::{bench_utils, RouteHop};
+ use crate::util::test_utils::TestLogger;
+ use crate::ln::features::{ChannelFeatures, NodeFeatures};
+
+ pub fn decay_100k_channel_bounds(bench: &mut Criterion) {
+ let logger = TestLogger::new();
+ let network_graph = bench_utils::read_network_graph(&logger).unwrap();
+ let mut scorer = ProbabilisticScorer::new(Default::default(), &network_graph, &logger);
+ // Score a number of random channels
+ let mut seed: u64 = 0xdeadbeef;
+ for _ in 0..100_000 {
+ seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
+ let (victim, victim_dst, amt) = {
+ let rong = network_graph.read_only();
+ let channels = rong.channels();
+ let chan = channels.unordered_iter()
+ .skip((seed as usize) % channels.len())
+ .next().unwrap();
+ seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
+ let amt = seed % chan.1.capacity_sats.map(|c| c * 1000)
+ .or(chan.1.one_to_two.as_ref().map(|info| info.htlc_maximum_msat))
+ .or(chan.1.two_to_one.as_ref().map(|info| info.htlc_maximum_msat))
+ .unwrap_or(1_000_000_000).saturating_add(1);
+ (*chan.0, chan.1.node_two, amt)
+ };
+ let path = Path {
+ hops: vec![RouteHop {
+ pubkey: victim_dst.as_pubkey().unwrap(),
+ node_features: NodeFeatures::empty(),
+ short_channel_id: victim,
+ channel_features: ChannelFeatures::empty(),
+ fee_msat: amt,
+ cltv_expiry_delta: 42,
+ maybe_announced_channel: true,
+ }],
+ blinded_tail: None
+ };
+ seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
+ if seed % 1 == 0 {
+ scorer.probe_failed(&path, victim, Duration::ZERO);
+ } else {
+ scorer.probe_successful(&path, Duration::ZERO);
+ }
+ }
+ let mut cur_time = Duration::ZERO;
+ cur_time += Duration::from_millis(1);
+ scorer.time_passed(cur_time);
+ bench.bench_function("decay_100k_channel_bounds", |b| b.iter(|| {
+ cur_time += Duration::from_millis(1);
+ scorer.time_passed(cur_time);
+ }));
+ }
+}
///
/// # Pruning stale channel updates
///
-/// Stale updates are pruned when a full monitor is written. The old monitor is first read, and if
-/// that succeeds, updates in the range between the old and new monitors are deleted. The `lazy`
-/// flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
+/// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`.
+/// Monitor updates in the range between the latest `update_id` and `update_id - maximum_pending_updates`
+/// are deleted.
+/// The `lazy` flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
/// will complete. However, stale updates are not a problem for data integrity, since updates are
/// only read that are higher than the stored [`ChannelMonitor`]'s `update_id`.
///
) -> chain::ChannelMonitorUpdateStatus {
// Determine the proper key for this monitor
let monitor_name = MonitorName::from(funding_txo);
- let maybe_old_monitor = self.read_monitor(&monitor_name);
- match maybe_old_monitor {
- Ok((_, ref old_monitor)) => {
- // Check that this key isn't already storing a monitor with a higher update_id
- // (collision)
- if old_monitor.get_latest_update_id() > monitor.get_latest_update_id() {
- log_error!(
- self.logger,
- "Tried to write a monitor at the same outpoint {} with a higher update_id!",
- monitor_name.as_str()
- );
- return chain::ChannelMonitorUpdateStatus::UnrecoverableError;
- }
- }
- // This means the channel monitor is new.
- Err(ref e) if e.kind() == io::ErrorKind::NotFound => {}
- _ => return chain::ChannelMonitorUpdateStatus::UnrecoverableError,
- }
// Serialize and write the new monitor
let mut monitor_bytes = Vec::with_capacity(
MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(),
&monitor_bytes,
) {
Ok(_) => {
- // Assess cleanup. Typically, we'll clean up only between the last two known full
- // monitors.
- if let Ok((_, old_monitor)) = maybe_old_monitor {
- let start = old_monitor.get_latest_update_id();
- let end = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
- // We don't want to clean the rest of u64, so just do possible pending
- // updates. Note that we never write updates at
- // `CLOSED_CHANNEL_UPDATE_ID`.
- cmp::min(
- start.saturating_add(self.maximum_pending_updates),
- CLOSED_CHANNEL_UPDATE_ID - 1,
- )
- } else {
- monitor.get_latest_update_id().saturating_sub(1)
- };
- // We should bother cleaning up only if there's at least one update
- // expected.
- for update_id in start..=end {
- let update_name = UpdateName::from(update_id);
- #[cfg(debug_assertions)]
- {
- if let Ok(update) =
- self.read_monitor_update(&monitor_name, &update_name)
- {
- // Assert that we are reading what we think we are.
- debug_assert_eq!(update.update_id, update_name.0);
- } else if update_id != start && monitor.get_latest_update_id() != CLOSED_CHANNEL_UPDATE_ID
- {
- // We're deleting something we should know doesn't exist.
- panic!(
- "failed to read monitor update {}",
- update_name.as_str()
- );
- }
- // On closed channels, we will unavoidably try to read
- // non-existent updates since we have to guess at the range of
- // stale updates, so do nothing.
- }
- if let Err(e) = self.kv_store.remove(
- CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
- monitor_name.as_str(),
- update_name.as_str(),
- true,
- ) {
- log_error!(
- self.logger,
- "error cleaning up channel monitor updates for monitor {}, reason: {}",
- monitor_name.as_str(),
- e
- );
- };
- }
- };
chain::ChannelMonitorUpdateStatus::Completed
}
Err(e) => {
log_error!(
self.logger,
- "error writing channel monitor {}/{}/{} reason: {}",
+ "Failed to write ChannelMonitor {}/{}/{} reason: {}",
CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
monitor_name.as_str(),
Err(e) => {
log_error!(
self.logger,
- "error writing channel monitor update {}/{}/{} reason: {}",
+ "Failed to write ChannelMonitorUpdate {}/{}/{} reason: {}",
CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
monitor_name.as_str(),
update_name.as_str(),
}
}
} else {
- // We could write this update, but it meets criteria of our design that call for a full monitor write.
- self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
+ let monitor_name = MonitorName::from(funding_txo);
+ // In case of channel-close monitor update, we need to read old monitor before persisting
+ // the new one in order to determine the cleanup range.
+ let maybe_old_monitor = match monitor.get_latest_update_id() {
+ CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(),
+ _ => None
+ };
+
+ // We could write this update, but it meets criteria of our design that calls for a full monitor write.
+ let monitor_update_status = self.persist_new_channel(funding_txo, monitor, monitor_update_call_id);
+
+ if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status {
+ let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
+ // If there is an error while reading old monitor, we skip clean up.
+ maybe_old_monitor.map(|(_, ref old_monitor)| {
+ let start = old_monitor.get_latest_update_id();
+ // We never persist an update with update_id = CLOSED_CHANNEL_UPDATE_ID
+ let end = cmp::min(
+ start.saturating_add(self.maximum_pending_updates),
+ CLOSED_CHANNEL_UPDATE_ID - 1,
+ );
+ (start, end)
+ })
+ } else {
+ let end = monitor.get_latest_update_id();
+ let start = end.saturating_sub(self.maximum_pending_updates);
+ Some((start, end))
+ };
+
+ if let Some((start, end)) = cleanup_range {
+ self.cleanup_in_range(monitor_name, start, end);
+ }
+ }
+
+ monitor_update_status
}
} else {
// There is no update given, so we must persist a new monitor.
}
}
+impl<K: Deref, L: Deref, ES: Deref, SP: Deref> MonitorUpdatingPersister<K, L, ES, SP>
+where
+ ES::Target: EntropySource + Sized,
+ K::Target: KVStore,
+ L::Target: Logger,
+ SP::Target: SignerProvider + Sized
+{
+ // Cleans up monitor updates for given monitor in range `start..=end`.
+ fn cleanup_in_range(&self, monitor_name: MonitorName, start: u64, end: u64) {
+ for update_id in start..=end {
+ let update_name = UpdateName::from(update_id);
+ if let Err(e) = self.kv_store.remove(
+ CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
+ monitor_name.as_str(),
+ update_name.as_str(),
+ true,
+ ) {
+ log_error!(
+ self.logger,
+ "Failed to clean up channel monitor updates for monitor {}, reason: {}",
+ monitor_name.as_str(),
+ e
+ );
+ };
+ }
+ }
+}
+
/// A struct representing a name for a monitor.
#[derive(Debug)]
struct MonitorName(String);
#[test]
fn persister_with_real_monitors() {
// This value is used later to limit how many iterations we perform.
- let test_max_pending_updates = 7;
+ let persister_0_max_pending_updates = 7;
+ // Intentionally set this to a smaller value to test a different alignment.
+ let persister_1_max_pending_updates = 3;
let chanmon_cfgs = create_chanmon_cfgs(4);
let persister_0 = MonitorUpdatingPersister {
kv_store: &TestStore::new(false),
logger: &TestLogger::new(),
- maximum_pending_updates: test_max_pending_updates,
+ maximum_pending_updates: persister_0_max_pending_updates,
entropy_source: &chanmon_cfgs[0].keys_manager,
signer_provider: &chanmon_cfgs[0].keys_manager,
};
let persister_1 = MonitorUpdatingPersister {
kv_store: &TestStore::new(false),
logger: &TestLogger::new(),
- // Intentionally set this to a smaller value to test a different alignment.
- maximum_pending_updates: 3,
+ maximum_pending_updates: persister_1_max_pending_updates,
entropy_source: &chanmon_cfgs[1].keys_manager,
signer_provider: &chanmon_cfgs[1].keys_manager,
};
node_cfgs[1].chain_monitor = chain_mon_1;
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
let broadcaster_1 = &chanmon_cfgs[3].tx_broadcaster;
for (_, mon) in persisted_chan_data_0.iter() {
// check that when we read it, we got the right update id
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
- // if the CM is at the correct update id without updates, ensure no updates are stored
+
+ // if the CM is at consolidation threshold, ensure no updates are stored.
let monitor_name = MonitorName::from(mon.get_funding_txo().0);
- let (_, cm_0) = persister_0.read_monitor(&monitor_name).unwrap();
- if cm_0.get_latest_update_id() == $expected_update_id {
+ if mon.get_latest_update_id() % persister_0_max_pending_updates == 0
+ || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
assert_eq!(
persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
monitor_name.as_str()).unwrap().len(),
for (_, mon) in persisted_chan_data_1.iter() {
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
let monitor_name = MonitorName::from(mon.get_funding_txo().0);
- let (_, cm_1) = persister_1.read_monitor(&monitor_name).unwrap();
- if cm_1.get_latest_update_id() == $expected_update_id {
+ // if the CM is at consolidation threshold, ensure no updates are stored.
+ if mon.get_latest_update_id() % persister_1_max_pending_updates == 0
+ || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
assert_eq!(
persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
monitor_name.as_str()).unwrap().len(),
// Send a few more payments to try all the alignments of max pending updates with
// updates for a payment sent and received.
let mut sender = 0;
- for i in 3..=test_max_pending_updates * 2 {
+ for i in 3..=persister_0_max_pending_updates * 2 {
let receiver;
if sender == 0 {
sender = 1;
// You may not use this file except in accordance with one or both of these
// licenses.
+use crate::blinded_path::BlindedPath;
+use crate::blinded_path::payment::ReceiveTlvs;
use crate::chain;
use crate::chain::WatchedOutput;
use crate::chain::chaininterface;
use crate::events;
use crate::events::bump_transaction::{WalletSource, Utxo};
use crate::ln::ChannelId;
-use crate::ln::channelmanager;
+use crate::ln::channelmanager::{ChannelDetails, self};
use crate::ln::chan_utils::CommitmentTransaction;
use crate::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
use crate::ln::{msgs, wire};
use crate::ln::msgs::LightningError;
use crate::ln::script::ShutdownScript;
-use crate::offers::invoice::UnsignedBolt12Invoice;
+use crate::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
use crate::offers::invoice_request::UnsignedInvoiceRequest;
+use crate::onion_message::{Destination, MessageRouter, OnionMessagePath};
use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId, RoutingFees};
use crate::routing::utxo::{UtxoLookup, UtxoLookupError, UtxoResult};
use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParameters, RouteHintHop, Router, ScorerAccountingForInFlightHtlcs};
use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::sighash::{SighashCache, EcdsaSighashType};
-use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey};
+use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey, self};
use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature};
use bitcoin::secp256k1::schnorr;
impl<'a> Router for TestRouter<'a> {
fn find_route(
- &self, payer: &PublicKey, params: &RouteParameters, first_hops: Option<&[&channelmanager::ChannelDetails]>,
+ &self, payer: &PublicKey, params: &RouteParameters, first_hops: Option<&[&ChannelDetails]>,
inflight_htlcs: InFlightHtlcs
) -> Result<Route, msgs::LightningError> {
if let Some((find_route_query, find_route_res)) = self.next_routes.lock().unwrap().pop_front() {
&[42; 32]
)
}
+
+ fn create_blinded_payment_paths<
+ ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, _recipient: PublicKey, _first_hops: Vec<ChannelDetails>, _tlvs: ReceiveTlvs,
+ _amount_msats: u64, _entropy_source: &ES, _secp_ctx: &Secp256k1<T>
+ ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
+ unreachable!()
+ }
+}
+
+impl<'a> MessageRouter for TestRouter<'a> {
+ fn find_path(
+ &self, _sender: PublicKey, _peers: Vec<PublicKey>, _destination: Destination
+ ) -> Result<OnionMessagePath, ()> {
+ unreachable!()
+ }
+
+ fn create_blinded_paths<
+ ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
+ _secp_ctx: &Secp256k1<T>
+ ) -> Result<Vec<BlindedPath>, ()> {
+ unreachable!()
+ }
}
impl<'a> Drop for TestRouter<'a> {
}
impl ScoreUpdate for TestScorer {
- fn payment_path_failed(&mut self, _actual_path: &Path, _actual_short_channel_id: u64) {}
+ fn payment_path_failed(&mut self, _actual_path: &Path, _actual_short_channel_id: u64, _duration_since_epoch: Duration) {}
+
+ fn payment_path_successful(&mut self, _actual_path: &Path, _duration_since_epoch: Duration) {}
- fn payment_path_successful(&mut self, _actual_path: &Path) {}
+ fn probe_failed(&mut self, _actual_path: &Path, _: u64, _duration_since_epoch: Duration) {}
- fn probe_failed(&mut self, _actual_path: &Path, _: u64) {}
+ fn probe_successful(&mut self, _actual_path: &Path, _duration_since_epoch: Duration) {}
- fn probe_successful(&mut self, _actual_path: &Path) {}
+ fn time_passed(&mut self, _duration_since_epoch: Duration) {}
}
impl Drop for TestScorer {
--- /dev/null
+## Backwards Compatibility
+
+* Nodes that upgrade to 0.0.119 and subsequently downgrade after receiving a payment to a blinded
+ path may lose privacy if one or more of those HTLCs fails.