use utils::test_logger;
+use std::convert::TryInto;
use std::collections::HashSet;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
count => {
for _ in 0..count {
scid += 1;
- let rnid = node_pks.iter().skip(slice_to_be16(get_slice!(2))as usize % node_pks.len()).next().unwrap();
+ let rnid = node_pks.iter().skip(u16::from_be_bytes(get_slice!(2).try_into().unwrap()) as usize % node_pks.len()).next().unwrap();
+ let capacity = u64::from_be_bytes(get_slice!(8).try_into().unwrap());
first_hops_vec.push(ChannelDetails {
channel_id: [0; 32],
counterparty: ChannelCounterparty {
channel_type: None,
short_channel_id: Some(scid),
inbound_scid_alias: None,
- channel_value_satoshis: slice_to_be64(get_slice!(8)),
+ channel_value_satoshis: capacity,
user_channel_id: 0, inbound_capacity_msat: 0,
unspendable_punishment_reserve: None,
confirmations_required: None,
is_outbound: true, is_funding_locked: true,
is_usable: true, is_public: true,
balance_msat: 0,
- outbound_capacity_msat: 0,
+ outbound_capacity_msat: capacity.saturating_mul(1000),
+ next_outbound_htlc_limit_msat: capacity.saturating_mul(1000),
inbound_htlc_minimum_msat: None,
inbound_htlc_maximum_msat: None,
});
let mut have_pruned = false;
loop {
- peer_manager.process_events(); // Note that this may block on ChannelManager's locking
channel_manager.process_pending_events(&event_handler);
chain_monitor.process_pending_events(&event_handler);
+ // Note that the PeerManager::process_events may block on ChannelManager's locks,
+ // hence it comes last here. When the ChannelManager finishes whatever it's doing,
+ // we want to ensure we get into `persist_manager` as quickly as we can, especially
+ // without running the normal event processing above and handing events to users.
+ //
+ // Specifically, on an *extremely* slow machine, we may see ChannelManager start
+ // processing a message effectively at any point during this loop. In order to
+ // minimize the time between such processing completing and persisting the updated
+ // ChannelManager, we want to minimize methods blocking on a ChannelManager
+ // generally, and as a fallback place such blocking only immediately before
+ // persistence.
+ peer_manager.process_events();
+
// We wait up to 100ms, but track how long it takes to detect being put to sleep,
// see `await_start`'s use below.
let await_start = Instant::now();
use crate::{BlockSource, BlockSourceResult, Cache, ChainNotifier};
use crate::poll::{ChainPoller, Validate, ValidatedBlockHeader};
-use bitcoin::blockdata::block::{Block, BlockHeader};
+use bitcoin::blockdata::block::BlockHeader;
use bitcoin::hash_types::BlockHash;
use bitcoin::network::constants::Network;
struct DynamicChainListener<'a, L: chain::Listen + ?Sized>(&'a L);
impl<'a, L: chain::Listen + ?Sized> chain::Listen for DynamicChainListener<'a, L> {
- fn block_connected(&self, _block: &Block, _height: u32) {
+ fn filtered_block_connected(&self, _header: &BlockHeader, _txdata: &chain::transaction::TransactionData, _height: u32) {
unreachable!()
}
struct ChainListenerSet<'a, L: chain::Listen + ?Sized>(Vec<(u32, &'a L)>);
impl<'a, L: chain::Listen + ?Sized> chain::Listen for ChainListenerSet<'a, L> {
- fn block_connected(&self, block: &Block, height: u32) {
+ fn filtered_block_connected(&self, header: &BlockHeader, txdata: &chain::transaction::TransactionData, height: u32) {
for (starting_height, chain_listener) in self.0.iter() {
if height > *starting_height {
- chain_listener.block_connected(block, height);
+ chain_listener.filtered_block_connected(header, txdata, height);
}
}
}
pub struct NullChainListener;
impl chain::Listen for NullChainListener {
- fn block_connected(&self, _block: &Block, _height: u32) {}
+ fn filtered_block_connected(&self, _header: &BlockHeader, _txdata: &chain::transaction::TransactionData, _height: u32) {}
fn block_disconnected(&self, _header: &BlockHeader, _height: u32) {}
}
}
impl chain::Listen for MockChainListener {
- fn block_connected(&self, block: &Block, height: u32) {
+ fn filtered_block_connected(&self, header: &BlockHeader, _txdata: &chain::transaction::TransactionData, height: u32) {
match self.expected_blocks_connected.borrow_mut().pop_front() {
None => {
- panic!("Unexpected block connected: {:?}", block.block_hash());
+ panic!("Unexpected block connected: {:?}", header.block_hash());
},
Some(expected_block) => {
- assert_eq!(block.block_hash(), expected_block.header.block_hash());
+ assert_eq!(header.block_hash(), expected_block.header.block_hash());
assert_eq!(height, expected_block.height);
},
}
//! events. The remote server would make use of [`ChainMonitor`] for block processing and for
//! servicing [`ChannelMonitor`] updates from the client.
-use bitcoin::blockdata::block::{Block, BlockHeader};
+use bitcoin::blockdata::block::BlockHeader;
use bitcoin::hash_types::Txid;
use chain;
L::Target: Logger,
P::Target: Persist<ChannelSigner>,
{
- fn block_connected(&self, block: &Block, height: u32) {
- let header = &block.header;
- let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
+ fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| {
monitor.block_connected(
//! security-domain-separated system design, you should consider having multiple paths for
//! ChannelMonitors to get out of the HSM and onto monitoring devices.
-use bitcoin::blockdata::block::{Block, BlockHeader};
+use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::transaction::{TxOut,Transaction};
use bitcoin::blockdata::script::{Script, Builder};
use bitcoin::blockdata::opcodes;
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn block_connected(&self, block: &Block, height: u32) {
- let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
- self.0.block_connected(&block.header, &txdata, height, &*self.1, &*self.2, &*self.3);
+ fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+ self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &*self.3);
}
fn block_disconnected(&self, header: &BlockHeader, height: u32) {
/// sourcing chain data using a block-oriented API should prefer this interface over [`Confirm`].
/// Such clients fetch the entire header chain whereas clients using [`Confirm`] only fetch headers
/// when needed.
+///
+/// By using [`Listen::filtered_block_connected`] this interface supports clients fetching the
+/// entire header chain and only blocks with matching transaction data using BIP 157 filters or
+/// other similar filtering.
pub trait Listen {
+ /// Notifies the listener that a block was added at the given height, with the transaction data
+ /// possibly filtered.
+ fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32);
+
/// Notifies the listener that a block was added at the given height.
- fn block_connected(&self, block: &Block, height: u32);
+ fn block_connected(&self, block: &Block, height: u32) {
+ let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
+ self.filtered_block_connected(&block.header, &txdata, height);
+ }
/// Notifies the listener that a block was removed at the given height.
fn block_disconnected(&self, header: &BlockHeader, height: u32);
}
impl<T: Listen> Listen for core::ops::Deref<Target = T> {
- fn block_connected(&self, block: &Block, height: u32) {
- (**self).block_connected(block, height);
+ fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+ (**self).filtered_block_connected(header, txdata, height);
}
fn block_disconnected(&self, header: &BlockHeader, height: u32) {
T::Target: Listen,
U::Target: Listen,
{
- fn block_connected(&self, block: &Block, height: u32) {
- self.0.block_connected(block, height);
- self.1.block_connected(block, height);
+ fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+ self.0.filtered_block_connected(header, txdata, height);
+ self.1.filtered_block_connected(header, txdata, height);
}
fn block_disconnected(&self, header: &BlockHeader, height: u32) {
pub counterparty_dust_limit_msat: u64,
}
+pub struct AvailableBalances {
+ /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
+ pub balance_msat: u64,
+ /// Total amount available for our counterparty to send to us.
+ pub inbound_capacity_msat: u64,
+ /// Total amount available for us to send to our counterparty.
+ pub outbound_capacity_msat: u64,
+ /// The maximum value we can assign to the next outbound HTLC
+ pub next_outbound_htlc_limit_msat: u64,
+}
+
#[derive(Debug, Clone, Copy, PartialEq)]
enum FeeUpdateState {
// Inbound states mirroring InboundHTLCState
pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
-/// Maximum `funding_satoshis` value, according to the BOLT #2 specification
-/// it's 2^24.
-pub const MAX_FUNDING_SATOSHIS: u64 = 1 << 24;
+/// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
+/// `option_support_large_channel` (aka wumbo channels) is not supported.
+/// It's 2^24 - 1.
+pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
+
+/// Total bitcoin supply in satoshis.
+pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
/// The maximum network dust limit for standard script formats. This currently represents the
/// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
let holder_signer = keys_provider.get_channel_signer(false, channel_value_satoshis);
let pubkeys = holder_signer.pubkeys().clone();
- if channel_value_satoshis >= MAX_FUNDING_SATOSHIS {
- return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than {}, it was {}", MAX_FUNDING_SATOSHIS, channel_value_satoshis)});
+ if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
+ return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
+ }
+ if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+ return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
}
let channel_value_msat = channel_value_satoshis * 1000;
if push_msat > channel_value_msat {
}
// Check sanity of message fields:
- if msg.funding_satoshis >= MAX_FUNDING_SATOSHIS {
- return Err(ChannelError::Close(format!("Funding must be smaller than {}. It was {}", MAX_FUNDING_SATOSHIS, msg.funding_satoshis)));
+ if msg.funding_satoshis > config.peer_channel_config_limits.max_funding_satoshis {
+ return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.peer_channel_config_limits.max_funding_satoshis, msg.funding_satoshis)));
+ }
+ if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+ return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
}
if msg.channel_reserve_satoshis > msg.funding_satoshis {
return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
stats
}
- /// Get the available (ie not including pending HTLCs) inbound and outbound balance in msat.
+ /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
/// Doesn't bother handling the
/// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
/// corner case properly.
- /// The channel reserve is subtracted from each balance.
- /// See also [`Channel::get_balance_msat`]
- pub fn get_inbound_outbound_available_balance_msat(&self) -> (u64, u64) {
+ pub fn get_available_balances(&self) -> AvailableBalances {
// Note that we have to handle overflow due to the above case.
- (
- cmp::max(self.channel_value_satoshis as i64 * 1000
- - self.value_to_self_msat as i64
- - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
- - self.holder_selected_channel_reserve_satoshis as i64 * 1000,
- 0) as u64,
- cmp::max(self.value_to_self_msat as i64
- - self.get_outbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
- - self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) as i64 * 1000,
- 0) as u64
- )
- }
+ let outbound_stats = self.get_outbound_pending_htlc_stats(None);
- /// Get our total balance in msat.
- /// This is the amount that would go to us if we close the channel, ignoring any on-chain fees.
- /// See also [`Channel::get_inbound_outbound_available_balance_msat`]
- pub fn get_balance_msat(&self) -> u64 {
- // Include our local balance, plus any inbound HTLCs we know the preimage for, minus any
- // HTLCs sent or which will be sent after commitment signed's are exchanged.
let mut balance_msat = self.value_to_self_msat;
for ref htlc in self.pending_inbound_htlcs.iter() {
if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
balance_msat += htlc.amount_msat;
}
}
- balance_msat - self.get_outbound_pending_htlc_stats(None).pending_htlcs_value_msat
+ balance_msat -= outbound_stats.pending_htlcs_value_msat;
+
+ let outbound_capacity_msat = cmp::max(self.value_to_self_msat as i64
+ - outbound_stats.pending_htlcs_value_msat as i64
+ - self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) as i64 * 1000,
+ 0) as u64;
+ AvailableBalances {
+ inbound_capacity_msat: cmp::max(self.channel_value_satoshis as i64 * 1000
+ - self.value_to_self_msat as i64
+ - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
+ - self.holder_selected_channel_reserve_satoshis as i64 * 1000,
+ 0) as u64,
+ outbound_capacity_msat,
+ next_outbound_htlc_limit_msat: cmp::max(cmp::min(outbound_capacity_msat as i64,
+ self.counterparty_max_htlc_value_in_flight_msat as i64
+ - outbound_stats.pending_htlcs_value_msat as i64),
+ 0) as u64,
+ balance_msat,
+ }
}
pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
if !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() {
return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
}
- if msg.fee_satoshis > 21_000_000 * 1_0000_0000 { //this is required to stop potential overflow in build_closing_transaction
+ if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
}
use ln::PaymentHash;
use ln::channelmanager::{HTLCSource, PaymentId};
use ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator};
- use ln::channel::MAX_FUNDING_SATOSHIS;
+ use ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS};
use ln::features::InitFeatures;
use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate};
use ln::script::ShutdownScript;
}
#[test]
- fn test_max_funding_satoshis() {
- assert!(MAX_FUNDING_SATOSHIS <= 21_000_000 * 100_000_000,
- "MAX_FUNDING_SATOSHIS is greater than all satoshis in existence");
+ fn test_max_funding_satoshis_no_wumbo() {
+ assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
+ assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
+ "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
}
#[test]
//! imply it needs to fail HTLCs/payments/channels it manages).
//!
-use bitcoin::blockdata::block::{Block, BlockHeader};
+use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::transaction::Transaction;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::network::constants::Network;
/// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
/// should be able to spend nearly this amount.
pub outbound_capacity_msat: u64,
+ /// The available outbound capacity for sending a single HTLC to the remote peer. This is
+ /// similar to [`ChannelDetails::outbound_capacity_msat`] but it may be further restricted by
+ /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
+ /// to use a limit as close as possible to the HTLC limit we can currently send.
+ ///
+ /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`].
+ pub next_outbound_htlc_limit_msat: u64,
/// The available inbound capacity for the remote peer to send HTLCs to us. This does not
/// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
/// available for inclusion in new inbound HTLCs).
///
/// Non-proportional fees are fixed according to our risk using the provided fee estimator.
///
- /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
- ///
/// Users need to notify the new ChannelManager when a new block is connected or
/// disconnected using its `block_connected` and `block_disconnected` methods, starting
/// from after `params.latest_hash`.
let channel_state = self.channel_state.lock().unwrap();
res.reserve(channel_state.by_id.len());
for (channel_id, channel) in channel_state.by_id.iter().filter(f) {
- let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
- let balance_msat = channel.get_balance_msat();
+ let balance = channel.get_available_balances();
let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
channel.get_holder_counterparty_selected_channel_reserve_satoshis();
res.push(ChannelDetails {
inbound_scid_alias: channel.latest_inbound_scid_alias(),
channel_value_satoshis: channel.get_value_satoshis(),
unspendable_punishment_reserve: to_self_reserve_satoshis,
- balance_msat,
- inbound_capacity_msat,
- outbound_capacity_msat,
+ balance_msat: balance.balance_msat,
+ inbound_capacity_msat: balance.inbound_capacity_msat,
+ outbound_capacity_msat: balance.outbound_capacity_msat,
+ next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
user_channel_id: channel.get_user_id(),
confirmations_required: channel.minimum_depth(),
force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn block_connected(&self, block: &Block, height: u32) {
+ fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
{
let best_block = self.best_block.read().unwrap();
- assert_eq!(best_block.block_hash(), block.header.prev_blockhash,
+ assert_eq!(best_block.block_hash(), header.prev_blockhash,
"Blocks must be connected in chain-order - the connected header must build on the last connected header");
assert_eq!(best_block.height(), height - 1,
"Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
}
- let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
- self.transactions_confirmed(&block.header, &txdata, height);
- self.best_block_updated(&block.header, height);
+ self.transactions_confirmed(header, txdata, height);
+ self.best_block_updated(header, height);
}
fn block_disconnected(&self, header: &BlockHeader, height: u32) {
(14, user_channel_id, required),
(16, balance_msat, required),
(18, outbound_capacity_msat, required),
+ // Note that by the time we get past the required read above, outbound_capacity_msat will be
+ // filled in, so we can safely unwrap it here.
+ (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap())),
(20, inbound_capacity_msat, required),
(22, confirmations_required, option),
(24, force_close_spend_delay, option),
// Byte 1
,
// Byte 2
- BasicMPP,
+ BasicMPP | Wumbo,
// Byte 3
ShutdownAnySegwit,
// Byte 4
// Byte 1
,
// Byte 2
- BasicMPP,
+ BasicMPP | Wumbo,
// Byte 3
ShutdownAnySegwit,
// Byte 4
define_feature!(17, BasicMPP, [InitContext, NodeContext, InvoiceContext],
"Feature flags for `basic_mpp`.", set_basic_mpp_optional, set_basic_mpp_required,
supports_basic_mpp, requires_basic_mpp);
+ define_feature!(19, Wumbo, [InitContext, NodeContext],
+ "Feature flags for `option_support_large_channel` (aka wumbo channels).", set_wumbo_optional, set_wumbo_required,
+ supports_wumbo, requires_wumbo);
define_feature!(27, ShutdownAnySegwit, [InitContext, NodeContext],
"Feature flags for `opt_shutdown_anysegwit`.", set_shutdown_any_segwit_optional,
set_shutdown_any_segwit_required, supports_shutdown_anysegwit, requires_shutdown_anysegwit);
self
}
}
+
+impl<T: sealed::Wumbo> Features<T> {
+ #[cfg(test)]
+ pub(crate) fn clear_wumbo(mut self) -> Self {
+ <T as sealed::Wumbo>::clear_bits(&mut self.flags);
+ self
+ }
+}
+
macro_rules! impl_feature_len_prefixed_write {
($features: ident) => {
impl Writeable for $features {
assert!(!InitFeatures::known().requires_scid_privacy());
assert!(!NodeFeatures::known().requires_scid_privacy());
+ assert!(InitFeatures::known().supports_wumbo());
+ assert!(NodeFeatures::known().supports_wumbo());
+ assert!(!InitFeatures::known().requires_wumbo());
+ assert!(!NodeFeatures::known().requires_wumbo());
+
let mut init_features = InitFeatures::known();
assert!(init_features.initial_routing_sync());
init_features.clear_initial_routing_sync();
// Check that the flags are as expected:
// - option_data_loss_protect
// - var_onion_optin (req) | static_remote_key (req) | payment_secret(req)
- // - basic_mpp
+ // - basic_mpp | wumbo
// - opt_shutdown_anysegwit
// -
// - option_channel_type | option_scid_alias
assert_eq!(node_features.flags.len(), 6);
assert_eq!(node_features.flags[0], 0b00000010);
assert_eq!(node_features.flags[1], 0b01010001);
- assert_eq!(node_features.flags[2], 0b00000010);
+ assert_eq!(node_features.flags[2], 0b00001010);
assert_eq!(node_features.flags[3], 0b00001000);
assert_eq!(node_features.flags[4], 0b00000000);
assert_eq!(node_features.flags[5], 0b10100000);
#[test]
fn test_insane_channel_opens() {
// Stand up a network of 2 nodes
+ use ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
+ let mut cfg = UserConfig::default();
+ cfg.peer_channel_config_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
// Instantiate channel parameters where we push the maximum msats given our
} else { assert!(false); }
};
- use ln::channel::MAX_FUNDING_SATOSHIS;
use ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
// Test all mutations that would make the channel open message insane
- insane_open_helper(format!("Funding must be smaller than {}. It was {}", MAX_FUNDING_SATOSHIS, MAX_FUNDING_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = MAX_FUNDING_SATOSHIS; msg });
+ insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
+ insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
}
+#[test]
+fn test_funding_exceeds_no_wumbo_limit() {
+ // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to
+ // them.
+ use ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ node_cfgs[1].features = InitFeatures::known().clear_wumbo();
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None) {
+ Err(APIError::APIMisuseError { err }) => {
+ assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
+ },
+ _ => panic!()
+ }
+}
+
fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
// A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
// but only for them. Because some LSPs do it with some level of trust of the clients (for a
match type_id {
msgs::ChannelAnnouncement::TYPE |
msgs::ChannelUpdate::TYPE |
- msgs::NodeAnnouncement::TYPE => true,
+ msgs::NodeAnnouncement::TYPE |
+ msgs::QueryChannelRange::TYPE |
+ msgs::ReplyChannelRange::TYPE |
+ msgs::QueryShortChannelIds::TYPE |
+ msgs::ReplyShortChannelIdsEnd::TYPE => true,
_ => false
}
}
fn effective_capacity(&self) -> EffectiveCapacity {
match self {
CandidateRouteHop::FirstHop { details } => EffectiveCapacity::ExactLiquidity {
- liquidity_msat: details.outbound_capacity_msat,
+ liquidity_msat: details.next_outbound_htlc_limit_msat,
},
CandidateRouteHop::PublicHop { info, .. } => info.effective_capacity(),
CandidateRouteHop::PrivateHop { .. } => EffectiveCapacity::Infinite,
// We don't want multiple paths (as per MPP) share liquidity of the same channels.
// This map allows paths to be aware of the channel use by other paths in the same call.
// This would help to make a better path finding decisions and not "overbook" channels.
- // It is unaware of the directions (except for `outbound_capacity_msat` in `first_hops`).
+ // It is unaware of the directions (except for `next_outbound_htlc_limit_msat` in
+ // `first_hops`).
let mut bookkept_channels_liquidity_available_msat = HashMap::with_capacity(network_nodes.len());
// Keeping track of how much value we already collected across other paths. Helps to decide:
// sort channels above `recommended_value_msat` in ascending order, preferring channels
// which have enough, but not too much, capacity for the payment.
channels.sort_unstable_by(|chan_a, chan_b| {
- if chan_b.outbound_capacity_msat < recommended_value_msat || chan_a.outbound_capacity_msat < recommended_value_msat {
+ if chan_b.next_outbound_htlc_limit_msat < recommended_value_msat || chan_a.next_outbound_htlc_limit_msat < recommended_value_msat {
// Sort in descending order
- chan_b.outbound_capacity_msat.cmp(&chan_a.outbound_capacity_msat)
+ chan_b.next_outbound_htlc_limit_msat.cmp(&chan_a.next_outbound_htlc_limit_msat)
} else {
// Sort in ascending order
- chan_a.outbound_capacity_msat.cmp(&chan_b.outbound_capacity_msat)
+ chan_a.next_outbound_htlc_limit_msat.cmp(&chan_b.next_outbound_htlc_limit_msat)
}
});
}
user_channel_id: 0,
balance_msat: 0,
outbound_capacity_msat,
+ next_outbound_htlc_limit_msat: outbound_capacity_msat,
inbound_capacity_msat: 42,
unspendable_punishment_reserve: None,
confirmations_required: None,
assert_eq!(path.last().unwrap().fee_msat, 250_000_000);
}
- // Check that setting outbound_capacity_msat in first_hops limits the channels.
+ // Check that setting next_outbound_htlc_limit_msat in first_hops limits the channels.
// Disable channel #1 and use another first hop.
update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
chain_hash: genesis_block(Network::Testnet).header.block_hash(),
excess_data: Vec::new()
});
- // Now, limit the first_hop by the outbound_capacity_msat of 200_000 sats.
+ // Now, limit the first_hop by the next_outbound_htlc_limit_msat of 200_000 sats.
let our_chans = vec![get_channel_details(Some(42), nodes[0].clone(), InitFeatures::from_le_bytes(vec![0b11]), 200_000_000)];
{
let payment_params = PaymentParameters::from_node_id(dst);
let amt = seed as u64 % 200_000_000;
let params = ProbabilisticScoringParameters::default();
- let scorer = ProbabilisticScorer::new(params, &graph);
- if get_route(src, &payment_params, &graph.read_only(), None, amt, 42, &test_utils::TestLogger::new(), &scorer, &random_seed_bytes).is_ok() {
+ let logger = test_utils::TestLogger::new();
+ let scorer = ProbabilisticScorer::new(params, &graph, &logger);
+ if get_route(src, &payment_params, &graph.read_only(), None, amt, 42, &logger, &scorer, &random_seed_bytes).is_ok() {
continue 'load_endpoints;
}
}
let payment_params = PaymentParameters::from_node_id(dst).with_features(InvoiceFeatures::known());
let amt = seed as u64 % 200_000_000;
let params = ProbabilisticScoringParameters::default();
- let scorer = ProbabilisticScorer::new(params, &graph);
- if get_route(src, &payment_params, &graph.read_only(), None, amt, 42, &test_utils::TestLogger::new(), &scorer, &random_seed_bytes).is_ok() {
+ let logger = test_utils::TestLogger::new();
+ let scorer = ProbabilisticScorer::new(params, &graph, &logger);
+ if get_route(src, &payment_params, &graph.read_only(), None, amt, 42, &logger, &scorer, &random_seed_bytes).is_ok() {
continue 'load_endpoints;
}
}
use ln::features::{InitFeatures, InvoiceFeatures};
use routing::scoring::{FixedPenaltyScorer, ProbabilisticScorer, ProbabilisticScoringParameters, Scorer};
use util::logger::{Logger, Record};
+ use util::test_utils::TestLogger;
use test::Bencher;
user_channel_id: 0,
balance_msat: 10_000_000,
outbound_capacity_msat: 10_000_000,
+ next_outbound_htlc_limit_msat: 10_000_000,
inbound_capacity_msat: 0,
unspendable_punishment_reserve: None,
confirmations_required: None,
#[bench]
fn generate_routes_with_probabilistic_scorer(bench: &mut Bencher) {
+ let logger = TestLogger::new();
let network_graph = read_network_graph();
let params = ProbabilisticScoringParameters::default();
- let scorer = ProbabilisticScorer::new(params, &network_graph);
+ let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
generate_routes(bench, &network_graph, scorer, InvoiceFeatures::empty());
}
#[bench]
fn generate_mpp_routes_with_probabilistic_scorer(bench: &mut Bencher) {
+ let logger = TestLogger::new();
let network_graph = read_network_graph();
let params = ProbabilisticScoringParameters::default();
- let scorer = ProbabilisticScorer::new(params, &network_graph);
+ let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
generate_routes(bench, &network_graph, scorer, InvoiceFeatures::known());
}
//! #
//! // Use the default channel penalties.
//! let params = ProbabilisticScoringParameters::default();
-//! let scorer = ProbabilisticScorer::new(params, &network_graph);
+//! let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
//!
//! // Or use custom channel penalties.
//! let params = ProbabilisticScoringParameters {
//! liquidity_penalty_multiplier_msat: 2 * 1000,
//! ..ProbabilisticScoringParameters::default()
//! };
-//! let scorer = ProbabilisticScorer::new(params, &network_graph);
+//! let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
//! # let random_seed_bytes = [42u8; 32];
//!
//! let route = find_route(&payer, &route_params, &network_graph, None, &logger, &scorer, &random_seed_bytes);
use routing::network_graph::{NetworkGraph, NodeId};
use routing::router::RouteHop;
use util::ser::{Readable, ReadableArgs, Writeable, Writer};
+use util::logger::Logger;
use prelude::*;
+use core::fmt;
use core::cell::{RefCell, RefMut};
use core::ops::{Deref, DerefMut};
use core::time::Duration;
/// behavior.
///
/// [1]: https://arxiv.org/abs/2107.05322
-pub type ProbabilisticScorer<G> = ProbabilisticScorerUsingTime::<G, ConfiguredTime>;
+pub type ProbabilisticScorer<G, L> = ProbabilisticScorerUsingTime::<G, L, ConfiguredTime>;
/// Probabilistic [`Score`] implementation.
///
/// (C-not exported) generally all users should use the [`ProbabilisticScorer`] type alias.
-pub struct ProbabilisticScorerUsingTime<G: Deref<Target = NetworkGraph>, T: Time> {
+pub struct ProbabilisticScorerUsingTime<G: Deref<Target = NetworkGraph>, L: Deref, T: Time> where L::Target: Logger {
params: ProbabilisticScoringParameters,
network_graph: G,
+ logger: L,
// TODO: Remove entries of closed channels.
channel_liquidities: HashMap<u64, ChannelLiquidity<T>>,
}
half_life: Duration,
}
-impl<G: Deref<Target = NetworkGraph>, T: Time> ProbabilisticScorerUsingTime<G, T> {
+impl<G: Deref<Target = NetworkGraph>, L: Deref, T: Time> ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
/// Creates a new scorer using the given scoring parameters for sending payments from a node
/// through a network graph.
- pub fn new(params: ProbabilisticScoringParameters, network_graph: G) -> Self {
+ pub fn new(params: ProbabilisticScoringParameters, network_graph: G, logger: L) -> Self {
Self {
params,
network_graph,
+ logger,
channel_liquidities: HashMap::new(),
}
}
impl<L: DerefMut<Target = u64>, T: Time, U: DerefMut<Target = T>> DirectedChannelLiquidity<L, T, U> {
/// Adjusts the channel liquidity balance bounds when failing to route `amount_msat`.
- fn failed_at_channel(&mut self, amount_msat: u64) {
+ fn failed_at_channel<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
if amount_msat < self.max_liquidity_msat() {
+ log_debug!(logger, "Setting max liquidity of {} to {}", chan_descr, amount_msat);
self.set_max_liquidity_msat(amount_msat);
+ } else {
+ log_trace!(logger, "Max liquidity of {} already more than {}", chan_descr, amount_msat);
}
}
/// Adjusts the channel liquidity balance bounds when failing to route `amount_msat` downstream.
- fn failed_downstream(&mut self, amount_msat: u64) {
+ fn failed_downstream<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
if amount_msat > self.min_liquidity_msat() {
+ log_debug!(logger, "Setting min liquidity of {} to {}", chan_descr, amount_msat);
self.set_min_liquidity_msat(amount_msat);
+ } else {
+ log_trace!(logger, "Min liquidity of {} already less than {}", chan_descr, amount_msat);
}
}
/// Adjusts the channel liquidity balance bounds when successfully routing `amount_msat`.
- fn successful(&mut self, amount_msat: u64) {
+ fn successful<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
let max_liquidity_msat = self.max_liquidity_msat().checked_sub(amount_msat).unwrap_or(0);
+ log_debug!(logger, "Subtracting {} from max liquidity of {} (setting it to {})", amount_msat, chan_descr, max_liquidity_msat);
self.set_max_liquidity_msat(max_liquidity_msat);
}
}
}
-impl<G: Deref<Target = NetworkGraph>, T: Time> Score for ProbabilisticScorerUsingTime<G, T> {
+impl<G: Deref<Target = NetworkGraph>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
fn channel_penalty_msat(
&self, short_channel_id: u64, amount_msat: u64, capacity_msat: u64, source: &NodeId,
target: &NodeId
fn payment_path_failed(&mut self, path: &[&RouteHop], short_channel_id: u64) {
let amount_msat = path.split_last().map(|(hop, _)| hop.fee_msat).unwrap_or(0);
let liquidity_offset_half_life = self.params.liquidity_offset_half_life;
+ log_trace!(self.logger, "Scoring path through to SCID {} as having failed at {} msat", short_channel_id, amount_msat);
let network_graph = self.network_graph.read_only();
- for hop in path {
+ for (hop_idx, hop) in path.iter().enumerate() {
let target = NodeId::from_pubkey(&hop.pubkey);
let channel_directed_from_source = network_graph.channels()
.get(&hop.short_channel_id)
.and_then(|channel| channel.as_directed_to(&target));
+ if hop.short_channel_id == short_channel_id && hop_idx == 0 {
+ log_warn!(self.logger, "Payment failed at the first hop - we do not attempt to learn channel info in such cases as we can directly observe local state.\n\tBecause we know the local state, we should generally not see failures here - this may be an indication that your channel peer on channel {} is broken and you may wish to close the channel.", hop.short_channel_id);
+ }
+
// Only score announced channels.
if let Some((channel, source)) = channel_directed_from_source {
let capacity_msat = channel.effective_capacity().as_msat();
.entry(hop.short_channel_id)
.or_insert_with(ChannelLiquidity::new)
.as_directed_mut(source, &target, capacity_msat, liquidity_offset_half_life)
- .failed_at_channel(amount_msat);
+ .failed_at_channel(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
break;
}
.entry(hop.short_channel_id)
.or_insert_with(ChannelLiquidity::new)
.as_directed_mut(source, &target, capacity_msat, liquidity_offset_half_life)
- .failed_downstream(amount_msat);
+ .failed_downstream(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
+ } else {
+ log_debug!(self.logger, "Not able to penalize channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
+ hop.short_channel_id);
}
}
}
fn payment_path_successful(&mut self, path: &[&RouteHop]) {
let amount_msat = path.split_last().map(|(hop, _)| hop.fee_msat).unwrap_or(0);
let liquidity_offset_half_life = self.params.liquidity_offset_half_life;
+ log_trace!(self.logger, "Scoring path through SCID {} as having succeeded at {} msat.",
+ path.split_last().map(|(hop, _)| hop.short_channel_id).unwrap_or(0), amount_msat);
let network_graph = self.network_graph.read_only();
for hop in path {
let target = NodeId::from_pubkey(&hop.pubkey);
.entry(hop.short_channel_id)
.or_insert_with(ChannelLiquidity::new)
.as_directed_mut(source, &target, capacity_msat, liquidity_offset_half_life)
- .successful(amount_msat);
+ .successful(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
+ } else {
+ log_debug!(self.logger, "Not able to learn for channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
+ hop.short_channel_id);
}
}
}
}
}
-impl<G: Deref<Target = NetworkGraph>, T: Time> Writeable for ProbabilisticScorerUsingTime<G, T> {
+impl<G: Deref<Target = NetworkGraph>, L: Deref, T: Time> Writeable for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
#[inline]
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
write_tlv_fields!(w, {
}
}
-impl<G: Deref<Target = NetworkGraph>, T: Time>
-ReadableArgs<(ProbabilisticScoringParameters, G)> for ProbabilisticScorerUsingTime<G, T> {
+impl<G: Deref<Target = NetworkGraph>, L: Deref, T: Time>
+ReadableArgs<(ProbabilisticScoringParameters, G, L)> for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
#[inline]
fn read<R: Read>(
- r: &mut R, args: (ProbabilisticScoringParameters, G)
+ r: &mut R, args: (ProbabilisticScoringParameters, G, L)
) -> Result<Self, DecodeError> {
- let (params, network_graph) = args;
+ let (params, network_graph, logger) = args;
let mut channel_liquidities = HashMap::new();
read_tlv_fields!(r, {
(0, channel_liquidities, required)
Ok(Self {
params,
network_graph,
+ logger,
channel_liquidities,
})
}
use routing::network_graph::{NetworkGraph, NodeId};
use routing::router::RouteHop;
use util::ser::{Readable, ReadableArgs, Writeable};
+ use util::test_utils::TestLogger;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::hashes::Hash;
// `ProbabilisticScorer` tests
/// A probabilistic scorer for testing with time that can be manually advanced.
- type ProbabilisticScorer<'a> = ProbabilisticScorerUsingTime::<&'a NetworkGraph, SinceEpoch>;
+ type ProbabilisticScorer<'a> = ProbabilisticScorerUsingTime::<&'a NetworkGraph, &'a TestLogger, SinceEpoch>;
fn sender_privkey() -> SecretKey {
SecretKey::from_slice(&[41; 32]).unwrap()
#[test]
fn liquidity_bounds_directed_from_lowest_node_id() {
+ let logger = TestLogger::new();
let last_updated = SinceEpoch::now();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters::default();
- let mut scorer = ProbabilisticScorer::new(params, &network_graph)
+ let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated
#[test]
fn resets_liquidity_upper_bound_when_crossed_by_lower_bound() {
+ let logger = TestLogger::new();
let last_updated = SinceEpoch::now();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters::default();
- let mut scorer = ProbabilisticScorer::new(params, &network_graph)
+ let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated
#[test]
fn resets_liquidity_lower_bound_when_crossed_by_upper_bound() {
+ let logger = TestLogger::new();
let last_updated = SinceEpoch::now();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters::default();
- let mut scorer = ProbabilisticScorer::new(params, &network_graph)
+ let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated
#[test]
fn increased_penalty_nearing_liquidity_upper_bound() {
+ let logger = TestLogger::new();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
..ProbabilisticScoringParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph);
+ let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
#[test]
fn constant_penalty_outside_liquidity_bounds() {
+ let logger = TestLogger::new();
let last_updated = SinceEpoch::now();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
..ProbabilisticScoringParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph)
+ let scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40, last_updated
#[test]
fn does_not_further_penalize_own_channel() {
+ let logger = TestLogger::new();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
..ProbabilisticScoringParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+ let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
let sender = sender_node_id();
let source = source_node_id();
let failed_path = payment_path_for_amount(500);
#[test]
fn sets_liquidity_lower_bound_on_downstream_failure() {
+ let logger = TestLogger::new();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
..ProbabilisticScoringParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+ let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
let path = payment_path_for_amount(500);
#[test]
fn sets_liquidity_upper_bound_on_failure() {
+ let logger = TestLogger::new();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
..ProbabilisticScoringParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+ let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
let path = payment_path_for_amount(500);
#[test]
fn reduces_liquidity_upper_bound_along_path_on_success() {
+ let logger = TestLogger::new();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
..ProbabilisticScoringParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+ let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
let sender = sender_node_id();
let source = source_node_id();
let target = target_node_id();
#[test]
fn decays_liquidity_bounds_over_time() {
+ let logger = TestLogger::new();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
liquidity_offset_half_life: Duration::from_secs(10),
..ProbabilisticScoringParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+ let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
#[test]
fn decays_liquidity_bounds_without_shift_overflow() {
+ let logger = TestLogger::new();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
liquidity_offset_half_life: Duration::from_secs(10),
..ProbabilisticScoringParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+ let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
assert_eq!(scorer.channel_penalty_msat(42, 256, 1_024, &source, &target), 125);
#[test]
fn restricts_liquidity_bounds_after_decay() {
+ let logger = TestLogger::new();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
liquidity_offset_half_life: Duration::from_secs(10),
..ProbabilisticScoringParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+ let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
#[test]
fn restores_persisted_liquidity_bounds() {
+ let logger = TestLogger::new();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
liquidity_offset_half_life: Duration::from_secs(10),
..ProbabilisticScoringParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+ let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
let deserialized_scorer =
- <ProbabilisticScorer>::read(&mut serialized_scorer, (params, &network_graph)).unwrap();
+ <ProbabilisticScorer>::read(&mut serialized_scorer, (params, &network_graph, &logger)).unwrap();
assert_eq!(deserialized_scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 300);
}
#[test]
fn decays_persisted_liquidity_bounds() {
+ let logger = TestLogger::new();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters {
liquidity_penalty_multiplier_msat: 1_000,
liquidity_offset_half_life: Duration::from_secs(10),
..ProbabilisticScoringParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+ let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
let deserialized_scorer =
- <ProbabilisticScorer>::read(&mut serialized_scorer, (params, &network_graph)).unwrap();
+ <ProbabilisticScorer>::read(&mut serialized_scorer, (params, &network_graph, &logger)).unwrap();
assert_eq!(deserialized_scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 473);
scorer.payment_path_failed(&payment_path_for_amount(250).iter().collect::<Vec<_>>(), 43);
fn scores_realistic_payments() {
// Shows the scores of "realistic" sends of 100k sats over channels of 1-10m sats (with a
// 50k sat reserve).
+ let logger = TestLogger::new();
let network_graph = network_graph();
let params = ProbabilisticScoringParameters::default();
- let scorer = ProbabilisticScorer::new(params, &network_graph);
+ let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
#[test]
fn adds_base_penalty_to_liquidity_penalty() {
+ let logger = TestLogger::new();
let network_graph = network_graph();
let source = source_node_id();
let target = target_node_id();
liquidity_penalty_multiplier_msat: 1_000,
..ProbabilisticScoringParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph);
+ let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
assert_eq!(scorer.channel_penalty_msat(42, 128, 1_024, &source, &target), 58);
let params = ProbabilisticScoringParameters {
base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph);
+ let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
assert_eq!(scorer.channel_penalty_msat(42, 128, 1_024, &source, &target), 558);
}
#[test]
fn adds_amount_penalty_to_liquidity_penalty() {
+ let logger = TestLogger::new();
let network_graph = network_graph();
let source = source_node_id();
let target = target_node_id();
amount_penalty_multiplier_msat: 0,
..ProbabilisticScoringParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph);
+ let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
assert_eq!(scorer.channel_penalty_msat(42, 512_000, 1_024_000, &source, &target), 300);
let params = ProbabilisticScoringParameters {
amount_penalty_multiplier_msat: 256,
..ProbabilisticScoringParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph);
+ let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
assert_eq!(scorer.channel_penalty_msat(42, 512_000, 1_024_000, &source, &target), 337);
}
#[test]
fn calculates_log10_without_overflowing_u64_max_value() {
+ let logger = TestLogger::new();
let network_graph = network_graph();
let source = source_node_id();
let target = target_node_id();
liquidity_penalty_multiplier_msat: 40_000,
..ProbabilisticScoringParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph);
+ let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
assert_eq!(
scorer.channel_penalty_msat(42, u64::max_value(), u64::max_value(), &source, &target),
80_000,
//! Various user-configurable channel limits and settings which ChannelManager
//! applies for you.
+use ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
use ln::channelmanager::{BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
/// Configuration we set when applicable.
/// are applied mostly only to incoming channels that's not much of a problem.
#[derive(Copy, Clone, Debug)]
pub struct ChannelHandshakeLimits {
- /// Minimum allowed satoshis when a channel is funded, this is supplied by the sender and so
+ /// Minimum allowed satoshis when a channel is funded. This is supplied by the sender and so
/// only applies to inbound channels.
///
/// Default value: 0.
pub min_funding_satoshis: u64,
+ /// Maximum allowed satoshis when a channel is funded. This is supplied by the sender and so
+ /// only applies to inbound channels.
+ ///
+ /// Default value: 2^24 - 1.
+ pub max_funding_satoshis: u64,
/// The remote node sets a limit on the minimum size of HTLCs we can send to them. This allows
/// you to limit the maximum minimum-size they can require.
///
fn default() -> Self {
ChannelHandshakeLimits {
min_funding_satoshis: 0,
+ max_funding_satoshis: MAX_FUNDING_SATOSHIS_NO_WUMBO,
max_htlc_minimum_msat: <u64>::max_value(),
min_max_htlc_value_in_flight_msat: 0,
max_channel_reserve_satoshis: <u64>::max_value(),
/// [`Route::get_total_fees`]: crate::routing::router::Route::get_total_fees
fee_paid_msat: Option<u64>,
},
+ /// Indicates an outbound payment failed. Individual [`Event::PaymentPathFailed`] events
+ /// provide failure information for each MPP part in the payment.
+ ///
+ /// This event is provided once there are no further pending HTLCs for the payment and the
+ /// payment is no longer retryable, either due to a several-block timeout or because
+ /// [`ChannelManager::abandon_payment`] was previously called for the corresponding payment.
+ ///
+ /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
+ PaymentFailed {
+ /// The id returned by [`ChannelManager::send_payment`] and used with
+ /// [`ChannelManager::retry_payment`] and [`ChannelManager::abandon_payment`].
+ ///
+ /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+ /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
+ /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
+ payment_id: PaymentId,
+ /// The hash that was given to [`ChannelManager::send_payment`].
+ ///
+ /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+ payment_hash: PaymentHash,
+ },
+ /// Indicates that a path for an outbound payment was successful.
+ ///
+ /// Always generated after [`Event::PaymentSent`] and thus useful for scoring channels. See
+ /// [`Event::PaymentSent`] for obtaining the payment preimage.
+ PaymentPathSuccessful {
+ /// The id returned by [`ChannelManager::send_payment`] and used with
+ /// [`ChannelManager::retry_payment`].
+ ///
+ /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+ /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
+ payment_id: PaymentId,
+ /// The hash that was given to [`ChannelManager::send_payment`].
+ ///
+ /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+ payment_hash: Option<PaymentHash>,
+ /// The payment path that was successful.
+ ///
+ /// May contain a closed channel if the HTLC sent along the path was fulfilled on chain.
+ path: Vec<RouteHop>,
+ },
/// Indicates an outbound HTLC we sent failed. Probably some intermediary node dropped
/// something. You may wish to retry with a different route.
///
#[cfg(test)]
error_data: Option<Vec<u8>>,
},
- /// Indicates an outbound payment failed. Individual [`Event::PaymentPathFailed`] events
- /// provide failure information for each MPP part in the payment.
- ///
- /// This event is provided once there are no further pending HTLCs for the payment and the
- /// payment is no longer retryable, either due to a several-block timeout or because
- /// [`ChannelManager::abandon_payment`] was previously called for the corresponding payment.
- ///
- /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
- PaymentFailed {
- /// The id returned by [`ChannelManager::send_payment`] and used with
- /// [`ChannelManager::retry_payment`] and [`ChannelManager::abandon_payment`].
- ///
- /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
- /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
- payment_id: PaymentId,
- /// The hash that was given to [`ChannelManager::send_payment`].
- ///
- /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- payment_hash: PaymentHash,
- },
/// Used to indicate that [`ChannelManager::process_pending_htlc_forwards`] should be called at
/// a time in the future.
///
/// The full transaction received from the user
transaction: Transaction
},
- /// Indicates that a path for an outbound payment was successful.
- ///
- /// Always generated after [`Event::PaymentSent`] and thus useful for scoring channels. See
- /// [`Event::PaymentSent`] for obtaining the payment preimage.
- PaymentPathSuccessful {
- /// The id returned by [`ChannelManager::send_payment`] and used with
- /// [`ChannelManager::retry_payment`].
- ///
- /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
- payment_id: PaymentId,
- /// The hash that was given to [`ChannelManager::send_payment`].
- ///
- /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
- payment_hash: Option<PaymentHash>,
- /// The payment path that was successful.
- ///
- /// May contain a closed channel if the HTLC sent along the path was fulfilled on chain.
- path: Vec<RouteHop>,
- },
/// Indicates a request to open a new channel by a peer.
///
/// To accept the request, call [`ChannelManager::accept_inbound_channel`]. To reject the