From: Matt Corallo <649246+TheBlueMatt@users.noreply.github.com> Date: Thu, 28 Apr 2022 21:56:49 +0000 (+0000) Subject: Merge pull request #1454 from TheBlueMatt/2022-04-fuzz-underflow X-Git-Tag: v0.0.107~49 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=dc8479a6202ea84489e51cba020a600f0a2b9695;hp=92c87bae199b666f1f212b2b91accb5712490885;p=rust-lightning Merge pull request #1454 from TheBlueMatt/2022-04-fuzz-underflow Reject channels if the total reserves are larger than the funding --- diff --git a/fuzz/src/router.rs b/fuzz/src/router.rs index b0f052db..27c5ee2b 100644 --- a/fuzz/src/router.rs +++ b/fuzz/src/router.rs @@ -29,6 +29,7 @@ use bitcoin::blockdata::constants::genesis_block; use utils::test_logger; +use std::convert::TryInto; use std::collections::HashSet; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -205,7 +206,8 @@ pub fn do_test(data: &[u8], out: Out) { count => { for _ in 0..count { scid += 1; - let rnid = node_pks.iter().skip(slice_to_be16(get_slice!(2))as usize % node_pks.len()).next().unwrap(); + let rnid = node_pks.iter().skip(u16::from_be_bytes(get_slice!(2).try_into().unwrap()) as usize % node_pks.len()).next().unwrap(); + let capacity = u64::from_be_bytes(get_slice!(8).try_into().unwrap()); first_hops_vec.push(ChannelDetails { channel_id: [0; 32], counterparty: ChannelCounterparty { @@ -220,7 +222,7 @@ pub fn do_test(data: &[u8], out: Out) { channel_type: None, short_channel_id: Some(scid), inbound_scid_alias: None, - channel_value_satoshis: slice_to_be64(get_slice!(8)), + channel_value_satoshis: capacity, user_channel_id: 0, inbound_capacity_msat: 0, unspendable_punishment_reserve: None, confirmations_required: None, @@ -228,7 +230,8 @@ pub fn do_test(data: &[u8], out: Out) { is_outbound: true, is_funding_locked: true, is_usable: true, is_public: true, balance_msat: 0, - outbound_capacity_msat: 0, + outbound_capacity_msat: capacity.saturating_mul(1000), + next_outbound_htlc_limit_msat: capacity.saturating_mul(1000), inbound_htlc_minimum_msat: None, inbound_htlc_maximum_msat: None, }); diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 7a1512b7..e23737c0 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -203,10 +203,22 @@ impl BackgroundProcessor { let mut have_pruned = false; loop { - peer_manager.process_events(); // Note that this may block on ChannelManager's locking channel_manager.process_pending_events(&event_handler); chain_monitor.process_pending_events(&event_handler); + // Note that the PeerManager::process_events may block on ChannelManager's locks, + // hence it comes last here. When the ChannelManager finishes whatever it's doing, + // we want to ensure we get into `persist_manager` as quickly as we can, especially + // without running the normal event processing above and handing events to users. + // + // Specifically, on an *extremely* slow machine, we may see ChannelManager start + // processing a message effectively at any point during this loop. In order to + // minimize the time between such processing completing and persisting the updated + // ChannelManager, we want to minimize methods blocking on a ChannelManager + // generally, and as a fallback place such blocking only immediately before + // persistence. + peer_manager.process_events(); + // We wait up to 100ms, but track how long it takes to detect being put to sleep, // see `await_start`'s use below. let await_start = Instant::now(); diff --git a/lightning-block-sync/src/init.rs b/lightning-block-sync/src/init.rs index f5d839d2..b3f745bd 100644 --- a/lightning-block-sync/src/init.rs +++ b/lightning-block-sync/src/init.rs @@ -4,7 +4,7 @@ use crate::{BlockSource, BlockSourceResult, Cache, ChainNotifier}; use crate::poll::{ChainPoller, Validate, ValidatedBlockHeader}; -use bitcoin::blockdata::block::{Block, BlockHeader}; +use bitcoin::blockdata::block::BlockHeader; use bitcoin::hash_types::BlockHash; use bitcoin::network::constants::Network; @@ -203,7 +203,7 @@ impl<'a, C: Cache> Cache for ReadOnlyCache<'a, C> { struct DynamicChainListener<'a, L: chain::Listen + ?Sized>(&'a L); impl<'a, L: chain::Listen + ?Sized> chain::Listen for DynamicChainListener<'a, L> { - fn block_connected(&self, _block: &Block, _height: u32) { + fn filtered_block_connected(&self, _header: &BlockHeader, _txdata: &chain::transaction::TransactionData, _height: u32) { unreachable!() } @@ -216,10 +216,10 @@ impl<'a, L: chain::Listen + ?Sized> chain::Listen for DynamicChainListener<'a, L struct ChainListenerSet<'a, L: chain::Listen + ?Sized>(Vec<(u32, &'a L)>); impl<'a, L: chain::Listen + ?Sized> chain::Listen for ChainListenerSet<'a, L> { - fn block_connected(&self, block: &Block, height: u32) { + fn filtered_block_connected(&self, header: &BlockHeader, txdata: &chain::transaction::TransactionData, height: u32) { for (starting_height, chain_listener) in self.0.iter() { if height > *starting_height { - chain_listener.block_connected(block, height); + chain_listener.filtered_block_connected(header, txdata, height); } } } diff --git a/lightning-block-sync/src/test_utils.rs b/lightning-block-sync/src/test_utils.rs index fe57c0c6..c101d4bd 100644 --- a/lightning-block-sync/src/test_utils.rs +++ b/lightning-block-sync/src/test_utils.rs @@ -166,7 +166,7 @@ impl BlockSource for Blockchain { pub struct NullChainListener; impl chain::Listen for NullChainListener { - fn block_connected(&self, _block: &Block, _height: u32) {} + fn filtered_block_connected(&self, _header: &BlockHeader, _txdata: &chain::transaction::TransactionData, _height: u32) {} fn block_disconnected(&self, _header: &BlockHeader, _height: u32) {} } @@ -195,13 +195,13 @@ impl MockChainListener { } impl chain::Listen for MockChainListener { - fn block_connected(&self, block: &Block, height: u32) { + fn filtered_block_connected(&self, header: &BlockHeader, _txdata: &chain::transaction::TransactionData, height: u32) { match self.expected_blocks_connected.borrow_mut().pop_front() { None => { - panic!("Unexpected block connected: {:?}", block.block_hash()); + panic!("Unexpected block connected: {:?}", header.block_hash()); }, Some(expected_block) => { - assert_eq!(block.block_hash(), expected_block.header.block_hash()); + assert_eq!(header.block_hash(), expected_block.header.block_hash()); assert_eq!(height, expected_block.height); }, } diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 19095fa2..aae260e7 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -23,7 +23,7 @@ //! events. The remote server would make use of [`ChainMonitor`] for block processing and for //! servicing [`ChannelMonitor`] updates from the client. -use bitcoin::blockdata::block::{Block, BlockHeader}; +use bitcoin::blockdata::block::BlockHeader; use bitcoin::hash_types::Txid; use chain; @@ -501,9 +501,7 @@ where L::Target: Logger, P::Target: Persist, { - fn block_connected(&self, block: &Block, height: u32) { - let header = &block.header; - let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); + fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) { log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height); self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| { monitor.block_connected( diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index fb2cbadd..681d895f 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -20,7 +20,7 @@ //! security-domain-separated system design, you should consider having multiple paths for //! ChannelMonitors to get out of the HSM and onto monitoring devices. -use bitcoin::blockdata::block::{Block, BlockHeader}; +use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::{TxOut,Transaction}; use bitcoin::blockdata::script::{Script, Builder}; use bitcoin::blockdata::opcodes; @@ -3007,9 +3007,8 @@ where F::Target: FeeEstimator, L::Target: Logger, { - fn block_connected(&self, block: &Block, height: u32) { - let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); - self.0.block_connected(&block.header, &txdata, height, &*self.1, &*self.2, &*self.3); + fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) { + self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &*self.3); } fn block_disconnected(&self, header: &BlockHeader, height: u32) { diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index 25e5a97d..24eb09e7 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -87,9 +87,20 @@ pub trait Access { /// sourcing chain data using a block-oriented API should prefer this interface over [`Confirm`]. /// Such clients fetch the entire header chain whereas clients using [`Confirm`] only fetch headers /// when needed. +/// +/// By using [`Listen::filtered_block_connected`] this interface supports clients fetching the +/// entire header chain and only blocks with matching transaction data using BIP 157 filters or +/// other similar filtering. pub trait Listen { + /// Notifies the listener that a block was added at the given height, with the transaction data + /// possibly filtered. + fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32); + /// Notifies the listener that a block was added at the given height. - fn block_connected(&self, block: &Block, height: u32); + fn block_connected(&self, block: &Block, height: u32) { + let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); + self.filtered_block_connected(&block.header, &txdata, height); + } /// Notifies the listener that a block was removed at the given height. fn block_disconnected(&self, header: &BlockHeader, height: u32); @@ -355,8 +366,8 @@ pub struct WatchedOutput { } impl Listen for core::ops::Deref { - fn block_connected(&self, block: &Block, height: u32) { - (**self).block_connected(block, height); + fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) { + (**self).filtered_block_connected(header, txdata, height); } fn block_disconnected(&self, header: &BlockHeader, height: u32) { @@ -369,9 +380,9 @@ where T::Target: Listen, U::Target: Listen, { - fn block_connected(&self, block: &Block, height: u32) { - self.0.block_connected(block, height); - self.1.block_connected(block, height); + fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) { + self.0.filtered_block_connected(header, txdata, height); + self.1.filtered_block_connected(header, txdata, height); } fn block_disconnected(&self, header: &BlockHeader, height: u32) { diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 60ff1ded..1cb7a689 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -62,6 +62,17 @@ pub struct ChannelValueStat { pub counterparty_dust_limit_msat: u64, } +pub struct AvailableBalances { + /// The amount that would go to us if we close the channel, ignoring any on-chain fees. + pub balance_msat: u64, + /// Total amount available for our counterparty to send to us. + pub inbound_capacity_msat: u64, + /// Total amount available for us to send to our counterparty. + pub outbound_capacity_msat: u64, + /// The maximum value we can assign to the next outbound HTLC + pub next_outbound_htlc_limit_msat: u64, +} + #[derive(Debug, Clone, Copy, PartialEq)] enum FeeUpdateState { // Inbound states mirroring InboundHTLCState @@ -734,9 +745,13 @@ pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172; pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330; -/// Maximum `funding_satoshis` value, according to the BOLT #2 specification -/// it's 2^24. -pub const MAX_FUNDING_SATOSHIS: u64 = 1 << 24; +/// Maximum `funding_satoshis` value according to the BOLT #2 specification, if +/// `option_support_large_channel` (aka wumbo channels) is not supported. +/// It's 2^24 - 1. +pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1; + +/// Total bitcoin supply in satoshis. +pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000; /// The maximum network dust limit for standard script formats. This currently represents the /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire @@ -850,8 +865,11 @@ impl Channel { let holder_signer = keys_provider.get_channel_signer(false, channel_value_satoshis); let pubkeys = holder_signer.pubkeys().clone(); - if channel_value_satoshis >= MAX_FUNDING_SATOSHIS { - return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than {}, it was {}", MAX_FUNDING_SATOSHIS, channel_value_satoshis)}); + if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO { + return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)}); + } + if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { + return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)}); } let channel_value_msat = channel_value_satoshis * 1000; if push_msat > channel_value_msat { @@ -1076,8 +1094,11 @@ impl Channel { } // Check sanity of message fields: - if msg.funding_satoshis >= MAX_FUNDING_SATOSHIS { - return Err(ChannelError::Close(format!("Funding must be smaller than {}. It was {}", MAX_FUNDING_SATOSHIS, msg.funding_satoshis))); + if msg.funding_satoshis > config.peer_channel_config_limits.max_funding_satoshis { + return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.peer_channel_config_limits.max_funding_satoshis, msg.funding_satoshis))); + } + if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { + return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis))); } if msg.channel_reserve_satoshis > msg.funding_satoshis { return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis))); @@ -2332,40 +2353,39 @@ impl Channel { stats } - /// Get the available (ie not including pending HTLCs) inbound and outbound balance in msat. + /// Get the available balances, see [`AvailableBalances`]'s fields for more info. /// Doesn't bother handling the /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC /// corner case properly. - /// The channel reserve is subtracted from each balance. - /// See also [`Channel::get_balance_msat`] - pub fn get_inbound_outbound_available_balance_msat(&self) -> (u64, u64) { + pub fn get_available_balances(&self) -> AvailableBalances { // Note that we have to handle overflow due to the above case. - ( - cmp::max(self.channel_value_satoshis as i64 * 1000 - - self.value_to_self_msat as i64 - - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 - - self.holder_selected_channel_reserve_satoshis as i64 * 1000, - 0) as u64, - cmp::max(self.value_to_self_msat as i64 - - self.get_outbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 - - self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) as i64 * 1000, - 0) as u64 - ) - } + let outbound_stats = self.get_outbound_pending_htlc_stats(None); - /// Get our total balance in msat. - /// This is the amount that would go to us if we close the channel, ignoring any on-chain fees. - /// See also [`Channel::get_inbound_outbound_available_balance_msat`] - pub fn get_balance_msat(&self) -> u64 { - // Include our local balance, plus any inbound HTLCs we know the preimage for, minus any - // HTLCs sent or which will be sent after commitment signed's are exchanged. let mut balance_msat = self.value_to_self_msat; for ref htlc in self.pending_inbound_htlcs.iter() { if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state { balance_msat += htlc.amount_msat; } } - balance_msat - self.get_outbound_pending_htlc_stats(None).pending_htlcs_value_msat + balance_msat -= outbound_stats.pending_htlcs_value_msat; + + let outbound_capacity_msat = cmp::max(self.value_to_self_msat as i64 + - outbound_stats.pending_htlcs_value_msat as i64 + - self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) as i64 * 1000, + 0) as u64; + AvailableBalances { + inbound_capacity_msat: cmp::max(self.channel_value_satoshis as i64 * 1000 + - self.value_to_self_msat as i64 + - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 + - self.holder_selected_channel_reserve_satoshis as i64 * 1000, + 0) as u64, + outbound_capacity_msat, + next_outbound_htlc_limit_msat: cmp::max(cmp::min(outbound_capacity_msat as i64, + self.counterparty_max_htlc_value_in_flight_msat as i64 + - outbound_stats.pending_htlcs_value_msat as i64), + 0) as u64, + balance_msat, + } } pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option) { @@ -4112,7 +4132,7 @@ impl Channel { if !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() { return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned())); } - if msg.fee_satoshis > 21_000_000 * 1_0000_0000 { //this is required to stop potential overflow in build_closing_transaction + if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned())); } @@ -6319,7 +6339,7 @@ mod tests { use ln::PaymentHash; use ln::channelmanager::{HTLCSource, PaymentId}; use ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator}; - use ln::channel::MAX_FUNDING_SATOSHIS; + use ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS}; use ln::features::InitFeatures; use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate}; use ln::script::ShutdownScript; @@ -6355,9 +6375,10 @@ mod tests { } #[test] - fn test_max_funding_satoshis() { - assert!(MAX_FUNDING_SATOSHIS <= 21_000_000 * 100_000_000, - "MAX_FUNDING_SATOSHIS is greater than all satoshis in existence"); + fn test_max_funding_satoshis_no_wumbo() { + assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000); + assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS, + "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence"); } #[test] diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 93746f0f..5990b2ad 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18,7 +18,7 @@ //! imply it needs to fail HTLCs/payments/channels it manages). //! -use bitcoin::blockdata::block::{Block, BlockHeader}; +use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::Transaction; use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; @@ -1005,6 +1005,13 @@ pub struct ChannelDetails { /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we /// should be able to spend nearly this amount. pub outbound_capacity_msat: u64, + /// The available outbound capacity for sending a single HTLC to the remote peer. This is + /// similar to [`ChannelDetails::outbound_capacity_msat`] but it may be further restricted by + /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us + /// to use a limit as close as possible to the HTLC limit we can currently send. + /// + /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`]. + pub next_outbound_htlc_limit_msat: u64, /// The available inbound capacity for the remote peer to send HTLCs to us. This does not /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not /// available for inclusion in new inbound HTLCs). @@ -1509,8 +1516,6 @@ impl ChannelMana /// /// Non-proportional fees are fixed according to our risk using the provided fee estimator. /// - /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`! - /// /// Users need to notify the new ChannelManager when a new block is connected or /// disconnected using its `block_connected` and `block_disconnected` methods, starting /// from after `params.latest_hash`. @@ -1670,8 +1675,7 @@ impl ChannelMana let channel_state = self.channel_state.lock().unwrap(); res.reserve(channel_state.by_id.len()); for (channel_id, channel) in channel_state.by_id.iter().filter(f) { - let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat(); - let balance_msat = channel.get_balance_msat(); + let balance = channel.get_available_balances(); let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = channel.get_holder_counterparty_selected_channel_reserve_satoshis(); res.push(ChannelDetails { @@ -1698,9 +1702,10 @@ impl ChannelMana inbound_scid_alias: channel.latest_inbound_scid_alias(), channel_value_satoshis: channel.get_value_satoshis(), unspendable_punishment_reserve: to_self_reserve_satoshis, - balance_msat, - inbound_capacity_msat, - outbound_capacity_msat, + balance_msat: balance.balance_msat, + inbound_capacity_msat: balance.inbound_capacity_msat, + outbound_capacity_msat: balance.outbound_capacity_msat, + next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, user_channel_id: channel.get_user_id(), confirmations_required: channel.minimum_depth(), force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), @@ -5303,18 +5308,17 @@ where F::Target: FeeEstimator, L::Target: Logger, { - fn block_connected(&self, block: &Block, height: u32) { + fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) { { let best_block = self.best_block.read().unwrap(); - assert_eq!(best_block.block_hash(), block.header.prev_blockhash, + assert_eq!(best_block.block_hash(), header.prev_blockhash, "Blocks must be connected in chain-order - the connected header must build on the last connected header"); assert_eq!(best_block.height(), height - 1, "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height"); } - let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); - self.transactions_confirmed(&block.header, &txdata, height); - self.best_block_updated(&block.header, height); + self.transactions_confirmed(header, txdata, height); + self.best_block_updated(header, height); } fn block_disconnected(&self, header: &BlockHeader, height: u32) { @@ -5937,6 +5941,9 @@ impl_writeable_tlv_based!(ChannelDetails, { (14, user_channel_id, required), (16, balance_msat, required), (18, outbound_capacity_msat, required), + // Note that by the time we get past the required read above, outbound_capacity_msat will be + // filled in, so we can safely unwrap it here. + (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap())), (20, inbound_capacity_msat, required), (22, confirmations_required, option), (24, force_close_spend_delay, option), diff --git a/lightning/src/ln/features.rs b/lightning/src/ln/features.rs index 4512ee80..25808746 100644 --- a/lightning/src/ln/features.rs +++ b/lightning/src/ln/features.rs @@ -137,7 +137,7 @@ mod sealed { // Byte 1 , // Byte 2 - BasicMPP, + BasicMPP | Wumbo, // Byte 3 ShutdownAnySegwit, // Byte 4 @@ -169,7 +169,7 @@ mod sealed { // Byte 1 , // Byte 2 - BasicMPP, + BasicMPP | Wumbo, // Byte 3 ShutdownAnySegwit, // Byte 4 @@ -390,6 +390,9 @@ mod sealed { define_feature!(17, BasicMPP, [InitContext, NodeContext, InvoiceContext], "Feature flags for `basic_mpp`.", set_basic_mpp_optional, set_basic_mpp_required, supports_basic_mpp, requires_basic_mpp); + define_feature!(19, Wumbo, [InitContext, NodeContext], + "Feature flags for `option_support_large_channel` (aka wumbo channels).", set_wumbo_optional, set_wumbo_required, + supports_wumbo, requires_wumbo); define_feature!(27, ShutdownAnySegwit, [InitContext, NodeContext], "Feature flags for `opt_shutdown_anysegwit`.", set_shutdown_any_segwit_optional, set_shutdown_any_segwit_required, supports_shutdown_anysegwit, requires_shutdown_anysegwit); @@ -740,6 +743,15 @@ impl Features { self } } + +impl Features { + #[cfg(test)] + pub(crate) fn clear_wumbo(mut self) -> Self { + ::clear_bits(&mut self.flags); + self + } +} + macro_rules! impl_feature_len_prefixed_write { ($features: ident) => { impl Writeable for $features { @@ -843,6 +855,11 @@ mod tests { assert!(!InitFeatures::known().requires_scid_privacy()); assert!(!NodeFeatures::known().requires_scid_privacy()); + assert!(InitFeatures::known().supports_wumbo()); + assert!(NodeFeatures::known().supports_wumbo()); + assert!(!InitFeatures::known().requires_wumbo()); + assert!(!NodeFeatures::known().requires_wumbo()); + let mut init_features = InitFeatures::known(); assert!(init_features.initial_routing_sync()); init_features.clear_initial_routing_sync(); @@ -878,14 +895,14 @@ mod tests { // Check that the flags are as expected: // - option_data_loss_protect // - var_onion_optin (req) | static_remote_key (req) | payment_secret(req) - // - basic_mpp + // - basic_mpp | wumbo // - opt_shutdown_anysegwit // - // - option_channel_type | option_scid_alias assert_eq!(node_features.flags.len(), 6); assert_eq!(node_features.flags[0], 0b00000010); assert_eq!(node_features.flags[1], 0b01010001); - assert_eq!(node_features.flags[2], 0b00000010); + assert_eq!(node_features.flags[2], 0b00001010); assert_eq!(node_features.flags[3], 0b00001000); assert_eq!(node_features.flags[4], 0b00000000); assert_eq!(node_features.flags[5], 0b10100000); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index aa2551f3..60fa9c80 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -58,9 +58,12 @@ use ln::chan_utils::CommitmentTransaction; #[test] fn test_insane_channel_opens() { // Stand up a network of 2 nodes + use ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS; + let mut cfg = UserConfig::default(); + cfg.peer_channel_config_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); // Instantiate channel parameters where we push the maximum msats given our @@ -92,11 +95,11 @@ fn test_insane_channel_opens() { } else { assert!(false); } }; - use ln::channel::MAX_FUNDING_SATOSHIS; use ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT; // Test all mutations that would make the channel open message insane - insane_open_helper(format!("Funding must be smaller than {}. It was {}", MAX_FUNDING_SATOSHIS, MAX_FUNDING_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = MAX_FUNDING_SATOSHIS; msg }); + insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg }); + insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg }); insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg }); @@ -113,6 +116,25 @@ fn test_insane_channel_opens() { insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg }); } +#[test] +fn test_funding_exceeds_no_wumbo_limit() { + // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to + // them. + use ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO; + let chanmon_cfgs = create_chanmon_cfgs(2); + let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + node_cfgs[1].features = InitFeatures::known().clear_wumbo(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None) { + Err(APIError::APIMisuseError { err }) => { + assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err); + }, + _ => panic!() + } +} + fn do_test_counterparty_no_reserve(send_from_initiator: bool) { // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure, // but only for them. Because some LSPs do it with some level of trust of the clients (for a diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index d12f8c06..c09df175 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1707,7 +1707,11 @@ fn is_gossip_msg(type_id: u16) -> bool { match type_id { msgs::ChannelAnnouncement::TYPE | msgs::ChannelUpdate::TYPE | - msgs::NodeAnnouncement::TYPE => true, + msgs::NodeAnnouncement::TYPE | + msgs::QueryChannelRange::TYPE | + msgs::ReplyChannelRange::TYPE | + msgs::QueryShortChannelIds::TYPE | + msgs::ReplyShortChannelIdsEnd::TYPE => true, _ => false } } diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 212ac2f1..816dfaad 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -412,7 +412,7 @@ impl<'a> CandidateRouteHop<'a> { fn effective_capacity(&self) -> EffectiveCapacity { match self { CandidateRouteHop::FirstHop { details } => EffectiveCapacity::ExactLiquidity { - liquidity_msat: details.outbound_capacity_msat, + liquidity_msat: details.next_outbound_htlc_limit_msat, }, CandidateRouteHop::PublicHop { info, .. } => info.effective_capacity(), CandidateRouteHop::PrivateHop { .. } => EffectiveCapacity::Infinite, @@ -818,7 +818,8 @@ where L::Target: Logger { // We don't want multiple paths (as per MPP) share liquidity of the same channels. // This map allows paths to be aware of the channel use by other paths in the same call. // This would help to make a better path finding decisions and not "overbook" channels. - // It is unaware of the directions (except for `outbound_capacity_msat` in `first_hops`). + // It is unaware of the directions (except for `next_outbound_htlc_limit_msat` in + // `first_hops`). let mut bookkept_channels_liquidity_available_msat = HashMap::with_capacity(network_nodes.len()); // Keeping track of how much value we already collected across other paths. Helps to decide: @@ -841,12 +842,12 @@ where L::Target: Logger { // sort channels above `recommended_value_msat` in ascending order, preferring channels // which have enough, but not too much, capacity for the payment. channels.sort_unstable_by(|chan_a, chan_b| { - if chan_b.outbound_capacity_msat < recommended_value_msat || chan_a.outbound_capacity_msat < recommended_value_msat { + if chan_b.next_outbound_htlc_limit_msat < recommended_value_msat || chan_a.next_outbound_htlc_limit_msat < recommended_value_msat { // Sort in descending order - chan_b.outbound_capacity_msat.cmp(&chan_a.outbound_capacity_msat) + chan_b.next_outbound_htlc_limit_msat.cmp(&chan_a.next_outbound_htlc_limit_msat) } else { // Sort in ascending order - chan_a.outbound_capacity_msat.cmp(&chan_b.outbound_capacity_msat) + chan_a.next_outbound_htlc_limit_msat.cmp(&chan_b.next_outbound_htlc_limit_msat) } }); } @@ -1746,6 +1747,7 @@ mod tests { user_channel_id: 0, balance_msat: 0, outbound_capacity_msat, + next_outbound_htlc_limit_msat: outbound_capacity_msat, inbound_capacity_msat: 42, unspendable_punishment_reserve: None, confirmations_required: None, @@ -3407,7 +3409,7 @@ mod tests { assert_eq!(path.last().unwrap().fee_msat, 250_000_000); } - // Check that setting outbound_capacity_msat in first_hops limits the channels. + // Check that setting next_outbound_htlc_limit_msat in first_hops limits the channels. // Disable channel #1 and use another first hop. update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate { chain_hash: genesis_block(Network::Testnet).header.block_hash(), @@ -3422,7 +3424,7 @@ mod tests { excess_data: Vec::new() }); - // Now, limit the first_hop by the outbound_capacity_msat of 200_000 sats. + // Now, limit the first_hop by the next_outbound_htlc_limit_msat of 200_000 sats. let our_chans = vec![get_channel_details(Some(42), nodes[0].clone(), InitFeatures::from_le_bytes(vec![0b11]), 200_000_000)]; { @@ -5350,8 +5352,9 @@ mod tests { let payment_params = PaymentParameters::from_node_id(dst); let amt = seed as u64 % 200_000_000; let params = ProbabilisticScoringParameters::default(); - let scorer = ProbabilisticScorer::new(params, &graph); - if get_route(src, &payment_params, &graph.read_only(), None, amt, 42, &test_utils::TestLogger::new(), &scorer, &random_seed_bytes).is_ok() { + let logger = test_utils::TestLogger::new(); + let scorer = ProbabilisticScorer::new(params, &graph, &logger); + if get_route(src, &payment_params, &graph.read_only(), None, amt, 42, &logger, &scorer, &random_seed_bytes).is_ok() { continue 'load_endpoints; } } @@ -5386,8 +5389,9 @@ mod tests { let payment_params = PaymentParameters::from_node_id(dst).with_features(InvoiceFeatures::known()); let amt = seed as u64 % 200_000_000; let params = ProbabilisticScoringParameters::default(); - let scorer = ProbabilisticScorer::new(params, &graph); - if get_route(src, &payment_params, &graph.read_only(), None, amt, 42, &test_utils::TestLogger::new(), &scorer, &random_seed_bytes).is_ok() { + let logger = test_utils::TestLogger::new(); + let scorer = ProbabilisticScorer::new(params, &graph, &logger); + if get_route(src, &payment_params, &graph.read_only(), None, amt, 42, &logger, &scorer, &random_seed_bytes).is_ok() { continue 'load_endpoints; } } @@ -5433,6 +5437,7 @@ mod benches { use ln::features::{InitFeatures, InvoiceFeatures}; use routing::scoring::{FixedPenaltyScorer, ProbabilisticScorer, ProbabilisticScoringParameters, Scorer}; use util::logger::{Logger, Record}; + use util::test_utils::TestLogger; use test::Bencher; @@ -5473,6 +5478,7 @@ mod benches { user_channel_id: 0, balance_msat: 10_000_000, outbound_capacity_msat: 10_000_000, + next_outbound_htlc_limit_msat: 10_000_000, inbound_capacity_msat: 0, unspendable_punishment_reserve: None, confirmations_required: None, @@ -5516,17 +5522,19 @@ mod benches { #[bench] fn generate_routes_with_probabilistic_scorer(bench: &mut Bencher) { + let logger = TestLogger::new(); let network_graph = read_network_graph(); let params = ProbabilisticScoringParameters::default(); - let scorer = ProbabilisticScorer::new(params, &network_graph); + let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); generate_routes(bench, &network_graph, scorer, InvoiceFeatures::empty()); } #[bench] fn generate_mpp_routes_with_probabilistic_scorer(bench: &mut Bencher) { + let logger = TestLogger::new(); let network_graph = read_network_graph(); let params = ProbabilisticScoringParameters::default(); - let scorer = ProbabilisticScorer::new(params, &network_graph); + let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); generate_routes(bench, &network_graph, scorer, InvoiceFeatures::known()); } diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index 206cc0a8..0e04c639 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -33,14 +33,14 @@ //! # //! // Use the default channel penalties. //! let params = ProbabilisticScoringParameters::default(); -//! let scorer = ProbabilisticScorer::new(params, &network_graph); +//! let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); //! //! // Or use custom channel penalties. //! let params = ProbabilisticScoringParameters { //! liquidity_penalty_multiplier_msat: 2 * 1000, //! ..ProbabilisticScoringParameters::default() //! }; -//! let scorer = ProbabilisticScorer::new(params, &network_graph); +//! let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); //! # let random_seed_bytes = [42u8; 32]; //! //! let route = find_route(&payer, &route_params, &network_graph, None, &logger, &scorer, &random_seed_bytes); @@ -58,8 +58,10 @@ use ln::msgs::DecodeError; use routing::network_graph::{NetworkGraph, NodeId}; use routing::router::RouteHop; use util::ser::{Readable, ReadableArgs, Writeable, Writer}; +use util::logger::Logger; use prelude::*; +use core::fmt; use core::cell::{RefCell, RefMut}; use core::ops::{Deref, DerefMut}; use core::time::Duration; @@ -503,14 +505,15 @@ impl Readable for ChannelFailure { /// behavior. /// /// [1]: https://arxiv.org/abs/2107.05322 -pub type ProbabilisticScorer = ProbabilisticScorerUsingTime::; +pub type ProbabilisticScorer = ProbabilisticScorerUsingTime::; /// Probabilistic [`Score`] implementation. /// /// (C-not exported) generally all users should use the [`ProbabilisticScorer`] type alias. -pub struct ProbabilisticScorerUsingTime, T: Time> { +pub struct ProbabilisticScorerUsingTime, L: Deref, T: Time> where L::Target: Logger { params: ProbabilisticScoringParameters, network_graph: G, + logger: L, // TODO: Remove entries of closed channels. channel_liquidities: HashMap>, } @@ -603,13 +606,14 @@ struct DirectedChannelLiquidity, T: Time, U: Deref, T: Time> ProbabilisticScorerUsingTime { +impl, L: Deref, T: Time> ProbabilisticScorerUsingTime where L::Target: Logger { /// Creates a new scorer using the given scoring parameters for sending payments from a node /// through a network graph. - pub fn new(params: ProbabilisticScoringParameters, network_graph: G) -> Self { + pub fn new(params: ProbabilisticScoringParameters, network_graph: G, logger: L) -> Self { Self { params, network_graph, + logger, channel_liquidities: HashMap::new(), } } @@ -787,22 +791,29 @@ impl, T: Time, U: Deref> DirectedChannelLiqui impl, T: Time, U: DerefMut> DirectedChannelLiquidity { /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat`. - fn failed_at_channel(&mut self, amount_msat: u64) { + fn failed_at_channel(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger { if amount_msat < self.max_liquidity_msat() { + log_debug!(logger, "Setting max liquidity of {} to {}", chan_descr, amount_msat); self.set_max_liquidity_msat(amount_msat); + } else { + log_trace!(logger, "Max liquidity of {} already more than {}", chan_descr, amount_msat); } } /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat` downstream. - fn failed_downstream(&mut self, amount_msat: u64) { + fn failed_downstream(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger { if amount_msat > self.min_liquidity_msat() { + log_debug!(logger, "Setting min liquidity of {} to {}", chan_descr, amount_msat); self.set_min_liquidity_msat(amount_msat); + } else { + log_trace!(logger, "Min liquidity of {} already less than {}", chan_descr, amount_msat); } } /// Adjusts the channel liquidity balance bounds when successfully routing `amount_msat`. - fn successful(&mut self, amount_msat: u64) { + fn successful(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger { let max_liquidity_msat = self.max_liquidity_msat().checked_sub(amount_msat).unwrap_or(0); + log_debug!(logger, "Subtracting {} from max liquidity of {} (setting it to {})", amount_msat, chan_descr, max_liquidity_msat); self.set_max_liquidity_msat(max_liquidity_msat); } @@ -829,7 +840,7 @@ impl, T: Time, U: DerefMut> DirectedChanne } } -impl, T: Time> Score for ProbabilisticScorerUsingTime { +impl, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime where L::Target: Logger { fn channel_penalty_msat( &self, short_channel_id: u64, amount_msat: u64, capacity_msat: u64, source: &NodeId, target: &NodeId @@ -845,13 +856,18 @@ impl, T: Time> Score for ProbabilisticScorerUsin fn payment_path_failed(&mut self, path: &[&RouteHop], short_channel_id: u64) { let amount_msat = path.split_last().map(|(hop, _)| hop.fee_msat).unwrap_or(0); let liquidity_offset_half_life = self.params.liquidity_offset_half_life; + log_trace!(self.logger, "Scoring path through to SCID {} as having failed at {} msat", short_channel_id, amount_msat); let network_graph = self.network_graph.read_only(); - for hop in path { + for (hop_idx, hop) in path.iter().enumerate() { let target = NodeId::from_pubkey(&hop.pubkey); let channel_directed_from_source = network_graph.channels() .get(&hop.short_channel_id) .and_then(|channel| channel.as_directed_to(&target)); + if hop.short_channel_id == short_channel_id && hop_idx == 0 { + log_warn!(self.logger, "Payment failed at the first hop - we do not attempt to learn channel info in such cases as we can directly observe local state.\n\tBecause we know the local state, we should generally not see failures here - this may be an indication that your channel peer on channel {} is broken and you may wish to close the channel.", hop.short_channel_id); + } + // Only score announced channels. if let Some((channel, source)) = channel_directed_from_source { let capacity_msat = channel.effective_capacity().as_msat(); @@ -860,7 +876,7 @@ impl, T: Time> Score for ProbabilisticScorerUsin .entry(hop.short_channel_id) .or_insert_with(ChannelLiquidity::new) .as_directed_mut(source, &target, capacity_msat, liquidity_offset_half_life) - .failed_at_channel(amount_msat); + .failed_at_channel(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger); break; } @@ -868,7 +884,10 @@ impl, T: Time> Score for ProbabilisticScorerUsin .entry(hop.short_channel_id) .or_insert_with(ChannelLiquidity::new) .as_directed_mut(source, &target, capacity_msat, liquidity_offset_half_life) - .failed_downstream(amount_msat); + .failed_downstream(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger); + } else { + log_debug!(self.logger, "Not able to penalize channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).", + hop.short_channel_id); } } } @@ -876,6 +895,8 @@ impl, T: Time> Score for ProbabilisticScorerUsin fn payment_path_successful(&mut self, path: &[&RouteHop]) { let amount_msat = path.split_last().map(|(hop, _)| hop.fee_msat).unwrap_or(0); let liquidity_offset_half_life = self.params.liquidity_offset_half_life; + log_trace!(self.logger, "Scoring path through SCID {} as having succeeded at {} msat.", + path.split_last().map(|(hop, _)| hop.short_channel_id).unwrap_or(0), amount_msat); let network_graph = self.network_graph.read_only(); for hop in path { let target = NodeId::from_pubkey(&hop.pubkey); @@ -890,7 +911,10 @@ impl, T: Time> Score for ProbabilisticScorerUsin .entry(hop.short_channel_id) .or_insert_with(ChannelLiquidity::new) .as_directed_mut(source, &target, capacity_msat, liquidity_offset_half_life) - .successful(amount_msat); + .successful(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger); + } else { + log_debug!(self.logger, "Not able to learn for channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).", + hop.short_channel_id); } } } @@ -1206,7 +1230,7 @@ mod approx { } } -impl, T: Time> Writeable for ProbabilisticScorerUsingTime { +impl, L: Deref, T: Time> Writeable for ProbabilisticScorerUsingTime where L::Target: Logger { #[inline] fn write(&self, w: &mut W) -> Result<(), io::Error> { write_tlv_fields!(w, { @@ -1216,13 +1240,13 @@ impl, T: Time> Writeable for ProbabilisticScorer } } -impl, T: Time> -ReadableArgs<(ProbabilisticScoringParameters, G)> for ProbabilisticScorerUsingTime { +impl, L: Deref, T: Time> +ReadableArgs<(ProbabilisticScoringParameters, G, L)> for ProbabilisticScorerUsingTime where L::Target: Logger { #[inline] fn read( - r: &mut R, args: (ProbabilisticScoringParameters, G) + r: &mut R, args: (ProbabilisticScoringParameters, G, L) ) -> Result { - let (params, network_graph) = args; + let (params, network_graph, logger) = args; let mut channel_liquidities = HashMap::new(); read_tlv_fields!(r, { (0, channel_liquidities, required) @@ -1230,6 +1254,7 @@ ReadableArgs<(ProbabilisticScoringParameters, G)> for ProbabilisticScorerUsingTi Ok(Self { params, network_graph, + logger, channel_liquidities, }) } @@ -1351,6 +1376,7 @@ mod tests { use routing::network_graph::{NetworkGraph, NodeId}; use routing::router::RouteHop; use util::ser::{Readable, ReadableArgs, Writeable}; + use util::test_utils::TestLogger; use bitcoin::blockdata::constants::genesis_block; use bitcoin::hashes::Hash; @@ -1695,7 +1721,7 @@ mod tests { // `ProbabilisticScorer` tests /// A probabilistic scorer for testing with time that can be manually advanced. - type ProbabilisticScorer<'a> = ProbabilisticScorerUsingTime::<&'a NetworkGraph, SinceEpoch>; + type ProbabilisticScorer<'a> = ProbabilisticScorerUsingTime::<&'a NetworkGraph, &'a TestLogger, SinceEpoch>; fn sender_privkey() -> SecretKey { SecretKey::from_slice(&[41; 32]).unwrap() @@ -1821,10 +1847,11 @@ mod tests { #[test] fn liquidity_bounds_directed_from_lowest_node_id() { + let logger = TestLogger::new(); let last_updated = SinceEpoch::now(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters::default(); - let mut scorer = ProbabilisticScorer::new(params, &network_graph) + let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger) .with_channel(42, ChannelLiquidity { min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated @@ -1895,10 +1922,11 @@ mod tests { #[test] fn resets_liquidity_upper_bound_when_crossed_by_lower_bound() { + let logger = TestLogger::new(); let last_updated = SinceEpoch::now(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters::default(); - let mut scorer = ProbabilisticScorer::new(params, &network_graph) + let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger) .with_channel(42, ChannelLiquidity { min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated @@ -1952,10 +1980,11 @@ mod tests { #[test] fn resets_liquidity_lower_bound_when_crossed_by_upper_bound() { + let logger = TestLogger::new(); let last_updated = SinceEpoch::now(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters::default(); - let mut scorer = ProbabilisticScorer::new(params, &network_graph) + let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger) .with_channel(42, ChannelLiquidity { min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated @@ -2009,12 +2038,13 @@ mod tests { #[test] fn increased_penalty_nearing_liquidity_upper_bound() { + let logger = TestLogger::new(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, ..ProbabilisticScoringParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph); + let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); @@ -2034,13 +2064,14 @@ mod tests { #[test] fn constant_penalty_outside_liquidity_bounds() { + let logger = TestLogger::new(); let last_updated = SinceEpoch::now(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, ..ProbabilisticScoringParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph) + let scorer = ProbabilisticScorer::new(params, &network_graph, &logger) .with_channel(42, ChannelLiquidity { min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40, last_updated @@ -2056,12 +2087,13 @@ mod tests { #[test] fn does_not_further_penalize_own_channel() { + let logger = TestLogger::new(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, ..ProbabilisticScoringParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph); + let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); let sender = sender_node_id(); let source = source_node_id(); let failed_path = payment_path_for_amount(500); @@ -2078,12 +2110,13 @@ mod tests { #[test] fn sets_liquidity_lower_bound_on_downstream_failure() { + let logger = TestLogger::new(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, ..ProbabilisticScoringParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph); + let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); let path = payment_path_for_amount(500); @@ -2101,12 +2134,13 @@ mod tests { #[test] fn sets_liquidity_upper_bound_on_failure() { + let logger = TestLogger::new(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, ..ProbabilisticScoringParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph); + let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); let path = payment_path_for_amount(500); @@ -2124,12 +2158,13 @@ mod tests { #[test] fn reduces_liquidity_upper_bound_along_path_on_success() { + let logger = TestLogger::new(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, ..ProbabilisticScoringParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph); + let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); let sender = sender_node_id(); let source = source_node_id(); let target = target_node_id(); @@ -2149,13 +2184,14 @@ mod tests { #[test] fn decays_liquidity_bounds_over_time() { + let logger = TestLogger::new(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, liquidity_offset_half_life: Duration::from_secs(10), ..ProbabilisticScoringParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph); + let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); @@ -2201,13 +2237,14 @@ mod tests { #[test] fn decays_liquidity_bounds_without_shift_overflow() { + let logger = TestLogger::new(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, liquidity_offset_half_life: Duration::from_secs(10), ..ProbabilisticScoringParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph); + let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); assert_eq!(scorer.channel_penalty_msat(42, 256, 1_024, &source, &target), 125); @@ -2226,13 +2263,14 @@ mod tests { #[test] fn restricts_liquidity_bounds_after_decay() { + let logger = TestLogger::new(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, liquidity_offset_half_life: Duration::from_secs(10), ..ProbabilisticScoringParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph); + let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); @@ -2264,13 +2302,14 @@ mod tests { #[test] fn restores_persisted_liquidity_bounds() { + let logger = TestLogger::new(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, liquidity_offset_half_life: Duration::from_secs(10), ..ProbabilisticScoringParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph); + let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); @@ -2288,19 +2327,20 @@ mod tests { let mut serialized_scorer = io::Cursor::new(&serialized_scorer); let deserialized_scorer = - ::read(&mut serialized_scorer, (params, &network_graph)).unwrap(); + ::read(&mut serialized_scorer, (params, &network_graph, &logger)).unwrap(); assert_eq!(deserialized_scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 300); } #[test] fn decays_persisted_liquidity_bounds() { + let logger = TestLogger::new(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters { liquidity_penalty_multiplier_msat: 1_000, liquidity_offset_half_life: Duration::from_secs(10), ..ProbabilisticScoringParameters::zero_penalty() }; - let mut scorer = ProbabilisticScorer::new(params, &network_graph); + let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); @@ -2314,7 +2354,7 @@ mod tests { let mut serialized_scorer = io::Cursor::new(&serialized_scorer); let deserialized_scorer = - ::read(&mut serialized_scorer, (params, &network_graph)).unwrap(); + ::read(&mut serialized_scorer, (params, &network_graph, &logger)).unwrap(); assert_eq!(deserialized_scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 473); scorer.payment_path_failed(&payment_path_for_amount(250).iter().collect::>(), 43); @@ -2328,9 +2368,10 @@ mod tests { fn scores_realistic_payments() { // Shows the scores of "realistic" sends of 100k sats over channels of 1-10m sats (with a // 50k sat reserve). + let logger = TestLogger::new(); let network_graph = network_graph(); let params = ProbabilisticScoringParameters::default(); - let scorer = ProbabilisticScorer::new(params, &network_graph); + let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); let source = source_node_id(); let target = target_node_id(); @@ -2349,6 +2390,7 @@ mod tests { #[test] fn adds_base_penalty_to_liquidity_penalty() { + let logger = TestLogger::new(); let network_graph = network_graph(); let source = source_node_id(); let target = target_node_id(); @@ -2357,18 +2399,19 @@ mod tests { liquidity_penalty_multiplier_msat: 1_000, ..ProbabilisticScoringParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph); + let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); assert_eq!(scorer.channel_penalty_msat(42, 128, 1_024, &source, &target), 58); let params = ProbabilisticScoringParameters { base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000, ..Default::default() }; - let scorer = ProbabilisticScorer::new(params, &network_graph); + let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); assert_eq!(scorer.channel_penalty_msat(42, 128, 1_024, &source, &target), 558); } #[test] fn adds_amount_penalty_to_liquidity_penalty() { + let logger = TestLogger::new(); let network_graph = network_graph(); let source = source_node_id(); let target = target_node_id(); @@ -2378,7 +2421,7 @@ mod tests { amount_penalty_multiplier_msat: 0, ..ProbabilisticScoringParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph); + let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); assert_eq!(scorer.channel_penalty_msat(42, 512_000, 1_024_000, &source, &target), 300); let params = ProbabilisticScoringParameters { @@ -2386,12 +2429,13 @@ mod tests { amount_penalty_multiplier_msat: 256, ..ProbabilisticScoringParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph); + let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); assert_eq!(scorer.channel_penalty_msat(42, 512_000, 1_024_000, &source, &target), 337); } #[test] fn calculates_log10_without_overflowing_u64_max_value() { + let logger = TestLogger::new(); let network_graph = network_graph(); let source = source_node_id(); let target = target_node_id(); @@ -2400,7 +2444,7 @@ mod tests { liquidity_penalty_multiplier_msat: 40_000, ..ProbabilisticScoringParameters::zero_penalty() }; - let scorer = ProbabilisticScorer::new(params, &network_graph); + let scorer = ProbabilisticScorer::new(params, &network_graph, &logger); assert_eq!( scorer.channel_penalty_msat(42, u64::max_value(), u64::max_value(), &source, &target), 80_000, diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index bd8b40b6..0f625098 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -10,6 +10,7 @@ //! Various user-configurable channel limits and settings which ChannelManager //! applies for you. +use ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO; use ln::channelmanager::{BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT}; /// Configuration we set when applicable. @@ -95,11 +96,16 @@ impl Default for ChannelHandshakeConfig { /// are applied mostly only to incoming channels that's not much of a problem. #[derive(Copy, Clone, Debug)] pub struct ChannelHandshakeLimits { - /// Minimum allowed satoshis when a channel is funded, this is supplied by the sender and so + /// Minimum allowed satoshis when a channel is funded. This is supplied by the sender and so /// only applies to inbound channels. /// /// Default value: 0. pub min_funding_satoshis: u64, + /// Maximum allowed satoshis when a channel is funded. This is supplied by the sender and so + /// only applies to inbound channels. + /// + /// Default value: 2^24 - 1. + pub max_funding_satoshis: u64, /// The remote node sets a limit on the minimum size of HTLCs we can send to them. This allows /// you to limit the maximum minimum-size they can require. /// @@ -151,6 +157,7 @@ impl Default for ChannelHandshakeLimits { fn default() -> Self { ChannelHandshakeLimits { min_funding_satoshis: 0, + max_funding_satoshis: MAX_FUNDING_SATOSHIS_NO_WUMBO, max_htlc_minimum_msat: ::max_value(), min_max_htlc_value_in_flight_msat: 0, max_channel_reserve_satoshis: ::max_value(), diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index d7eb16ee..f35dc14e 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -230,6 +230,47 @@ pub enum Event { /// [`Route::get_total_fees`]: crate::routing::router::Route::get_total_fees fee_paid_msat: Option, }, + /// Indicates an outbound payment failed. Individual [`Event::PaymentPathFailed`] events + /// provide failure information for each MPP part in the payment. + /// + /// This event is provided once there are no further pending HTLCs for the payment and the + /// payment is no longer retryable, either due to a several-block timeout or because + /// [`ChannelManager::abandon_payment`] was previously called for the corresponding payment. + /// + /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment + PaymentFailed { + /// The id returned by [`ChannelManager::send_payment`] and used with + /// [`ChannelManager::retry_payment`] and [`ChannelManager::abandon_payment`]. + /// + /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment + /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment + /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment + payment_id: PaymentId, + /// The hash that was given to [`ChannelManager::send_payment`]. + /// + /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment + payment_hash: PaymentHash, + }, + /// Indicates that a path for an outbound payment was successful. + /// + /// Always generated after [`Event::PaymentSent`] and thus useful for scoring channels. See + /// [`Event::PaymentSent`] for obtaining the payment preimage. + PaymentPathSuccessful { + /// The id returned by [`ChannelManager::send_payment`] and used with + /// [`ChannelManager::retry_payment`]. + /// + /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment + /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment + payment_id: PaymentId, + /// The hash that was given to [`ChannelManager::send_payment`]. + /// + /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment + payment_hash: Option, + /// The payment path that was successful. + /// + /// May contain a closed channel if the HTLC sent along the path was fulfilled on chain. + path: Vec, + }, /// Indicates an outbound HTLC we sent failed. Probably some intermediary node dropped /// something. You may wish to retry with a different route. /// @@ -299,27 +340,6 @@ pub enum Event { #[cfg(test)] error_data: Option>, }, - /// Indicates an outbound payment failed. Individual [`Event::PaymentPathFailed`] events - /// provide failure information for each MPP part in the payment. - /// - /// This event is provided once there are no further pending HTLCs for the payment and the - /// payment is no longer retryable, either due to a several-block timeout or because - /// [`ChannelManager::abandon_payment`] was previously called for the corresponding payment. - /// - /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment - PaymentFailed { - /// The id returned by [`ChannelManager::send_payment`] and used with - /// [`ChannelManager::retry_payment`] and [`ChannelManager::abandon_payment`]. - /// - /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment - /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment - /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment - payment_id: PaymentId, - /// The hash that was given to [`ChannelManager::send_payment`]. - /// - /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment - payment_hash: PaymentHash, - }, /// Used to indicate that [`ChannelManager::process_pending_htlc_forwards`] should be called at /// a time in the future. /// @@ -390,26 +410,6 @@ pub enum Event { /// The full transaction received from the user transaction: Transaction }, - /// Indicates that a path for an outbound payment was successful. - /// - /// Always generated after [`Event::PaymentSent`] and thus useful for scoring channels. See - /// [`Event::PaymentSent`] for obtaining the payment preimage. - PaymentPathSuccessful { - /// The id returned by [`ChannelManager::send_payment`] and used with - /// [`ChannelManager::retry_payment`]. - /// - /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment - /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment - payment_id: PaymentId, - /// The hash that was given to [`ChannelManager::send_payment`]. - /// - /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment - payment_hash: Option, - /// The payment path that was successful. - /// - /// May contain a closed channel if the HTLC sent along the path was fulfilled on chain. - path: Vec, - }, /// Indicates a request to open a new channel by a peer. /// /// To accept the request, call [`ChannelManager::accept_inbound_channel`]. To reject the