From: Matt Corallo <649246+TheBlueMatt@users.noreply.github.com> Date: Tue, 10 Aug 2021 22:11:18 +0000 (+0000) Subject: Merge pull request #1009 from ariard/2021-07-add-forward-dust-limit X-Git-Tag: v0.0.100~4 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=9d8d24f6906d6fbdc6c02a88e5e2298c1fa50825;hp=d4b6f58ba67a40d49a58722a104d2d3ea53a0410;p=rust-lightning Merge pull request #1009 from ariard/2021-07-add-forward-dust-limit Add new config setting `max_balance_dust_htlc_msat` --- diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index ef9ce82d..4ce0df09 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -247,6 +247,7 @@ fn check_api_err(api_err: APIError) { _ if err.starts_with("Cannot send value that would put counterparty balance under holder-announced channel reserve value") => {}, _ if err.starts_with("Cannot send value that would overdraw remaining funds.") => {}, _ if err.starts_with("Cannot send value that would not leave enough to pay for fees.") => {}, + _ if err.starts_with("Cannot send value that would put our exposure to dust HTLCs at") => {}, _ => panic!("{}", err), } }, diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index b678f0f4..e685c15d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -275,6 +275,14 @@ enum HTLCInitiator { RemoteOffered, } +/// An enum gathering stats on pending HTLCs, either inbound or outbound side. +struct HTLCStats { + pending_htlcs: u32, + pending_htlcs_value_msat: u64, + on_counterparty_tx_dust_exposure_msat: u64, + on_holder_tx_dust_exposure_msat: u64, +} + /// Used when calculating whether we or the remote can afford an additional HTLC. struct HTLCCandidate { amount_msat: u64, @@ -1842,32 +1850,63 @@ impl Channel { Ok(()) } - /// Returns (inbound_htlc_count, htlc_inbound_value_msat) - fn get_inbound_pending_htlc_stats(&self) -> (u32, u64) { - let mut htlc_inbound_value_msat = 0; + /// Returns a HTLCStats about inbound pending htlcs + fn get_inbound_pending_htlc_stats(&self) -> HTLCStats { + let mut stats = HTLCStats { + pending_htlcs: self.pending_inbound_htlcs.len() as u32, + pending_htlcs_value_msat: 0, + on_counterparty_tx_dust_exposure_msat: 0, + on_holder_tx_dust_exposure_msat: 0, + }; + + let counterparty_dust_limit_timeout_sat = (self.get_dust_buffer_feerate() as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis; + let holder_dust_limit_success_sat = (self.get_dust_buffer_feerate() as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis; for ref htlc in self.pending_inbound_htlcs.iter() { - htlc_inbound_value_msat += htlc.amount_msat; + stats.pending_htlcs_value_msat += htlc.amount_msat; + if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat { + stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; + } + if htlc.amount_msat / 1000 < holder_dust_limit_success_sat { + stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; + } } - (self.pending_inbound_htlcs.len() as u32, htlc_inbound_value_msat) + stats } - /// Returns (outbound_htlc_count, htlc_outbound_value_msat) *including* pending adds in our - /// holding cell. - fn get_outbound_pending_htlc_stats(&self) -> (u32, u64) { - let mut htlc_outbound_value_msat = 0; + /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell. + fn get_outbound_pending_htlc_stats(&self) -> HTLCStats { + let mut stats = HTLCStats { + pending_htlcs: self.pending_outbound_htlcs.len() as u32, + pending_htlcs_value_msat: 0, + on_counterparty_tx_dust_exposure_msat: 0, + on_holder_tx_dust_exposure_msat: 0, + }; + + let counterparty_dust_limit_success_sat = (self.get_dust_buffer_feerate() as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis; + let holder_dust_limit_timeout_sat = (self.get_dust_buffer_feerate() as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis; for ref htlc in self.pending_outbound_htlcs.iter() { - htlc_outbound_value_msat += htlc.amount_msat; + stats.pending_htlcs_value_msat += htlc.amount_msat; + if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat { + stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; + } + if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat { + stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; + } } - let mut htlc_outbound_count = self.pending_outbound_htlcs.len(); for update in self.holding_cell_htlc_updates.iter() { if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update { - htlc_outbound_count += 1; - htlc_outbound_value_msat += amount_msat; + stats.pending_htlcs += 1; + stats.pending_htlcs_value_msat += amount_msat; + if *amount_msat / 1000 < counterparty_dust_limit_success_sat { + stats.on_counterparty_tx_dust_exposure_msat += amount_msat; + } + if *amount_msat / 1000 < holder_dust_limit_timeout_sat { + stats.on_holder_tx_dust_exposure_msat += amount_msat; + } } } - - (htlc_outbound_count as u32, htlc_outbound_value_msat) + stats } /// Get the available (ie not including pending HTLCs) inbound and outbound balance in msat. @@ -1879,11 +1918,11 @@ impl Channel { ( cmp::max(self.channel_value_satoshis as i64 * 1000 - self.value_to_self_msat as i64 - - self.get_inbound_pending_htlc_stats().1 as i64 + - self.get_inbound_pending_htlc_stats().pending_htlcs_value_msat as i64 - Self::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) as i64 * 1000, 0) as u64, cmp::max(self.value_to_self_msat as i64 - - self.get_outbound_pending_htlc_stats().1 as i64 + - self.get_outbound_pending_htlc_stats().pending_htlcs_value_msat as i64 - self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) as i64 * 1000, 0) as u64 ) @@ -2095,12 +2134,13 @@ impl Channel { return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.holder_htlc_minimum_msat, msg.amount_msat))); } - let (inbound_htlc_count, htlc_inbound_value_msat) = self.get_inbound_pending_htlc_stats(); - if inbound_htlc_count + 1 > OUR_MAX_HTLCS as u32 { + let inbound_stats = self.get_inbound_pending_htlc_stats(); + let outbound_stats = self.get_outbound_pending_htlc_stats(); + if inbound_stats.pending_htlcs + 1 > OUR_MAX_HTLCS as u32 { return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", OUR_MAX_HTLCS))); } let holder_max_htlc_value_in_flight_msat = Channel::::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis); - if htlc_inbound_value_msat + msg.amount_msat > holder_max_htlc_value_in_flight_msat { + if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > holder_max_htlc_value_in_flight_msat { return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", holder_max_htlc_value_in_flight_msat))); } // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet @@ -2124,8 +2164,28 @@ impl Channel { } } + let exposure_dust_limit_timeout_sats = (self.get_dust_buffer_feerate() as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis; + if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { + let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; + if on_counterparty_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { + log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", + on_counterparty_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat()); + pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); + } + } + + let exposure_dust_limit_success_sats = (self.get_dust_buffer_feerate() as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis; + if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { + let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; + if on_holder_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { + log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", + on_holder_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat()); + pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); + } + } + let pending_value_to_self_msat = - self.value_to_self_msat + htlc_inbound_value_msat - removed_outbound_total_msat; + self.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat; let pending_remote_value_msat = self.channel_value_satoshis * 1000 - pending_value_to_self_msat; if pending_remote_value_msat < msg.amount_msat { @@ -3558,11 +3618,24 @@ impl Channel { cmp::max(self.config.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA) } + pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 { + self.config.max_dust_htlc_exposure_msat + } + #[cfg(test)] pub fn get_feerate(&self) -> u32 { self.feerate_per_kw } + pub fn get_dust_buffer_feerate(&self) -> u32 { + // When calculating our exposure to dust HTLCs, we assume that the channel feerate + // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%, + // whichever is higher. This ensures that we aren't suddenly exposed to significantly + // more dust balance if the feerate increases when we have several HTLCs pending + // which are near the dust limit. + cmp::max(2530, self.feerate_per_kw * 1250 / 1000) + } + pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 { self.cur_holder_commitment_transaction_number + 1 } @@ -4207,12 +4280,13 @@ impl Channel { return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned())); } - let (outbound_htlc_count, htlc_outbound_value_msat) = self.get_outbound_pending_htlc_stats(); - if outbound_htlc_count + 1 > self.counterparty_max_accepted_htlcs as u32 { + let inbound_stats = self.get_inbound_pending_htlc_stats(); + let outbound_stats = self.get_outbound_pending_htlc_stats(); + if outbound_stats.pending_htlcs + 1 > self.counterparty_max_accepted_htlcs as u32 { return Err(ChannelError::Ignore(format!("Cannot push more than their max accepted HTLCs ({})", self.counterparty_max_accepted_htlcs))); } // Check their_max_htlc_value_in_flight_msat - if htlc_outbound_value_msat + amount_msat > self.counterparty_max_htlc_value_in_flight_msat { + if outbound_stats.pending_htlcs_value_msat + amount_msat > self.counterparty_max_htlc_value_in_flight_msat { return Err(ChannelError::Ignore(format!("Cannot send value that would put us over the max HTLC value in flight our peer will accept ({})", self.counterparty_max_htlc_value_in_flight_msat))); } @@ -4227,7 +4301,25 @@ impl Channel { } } - let pending_value_to_self_msat = self.value_to_self_msat - htlc_outbound_value_msat; + let exposure_dust_limit_success_sats = (self.get_dust_buffer_feerate() as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis; + if amount_msat / 1000 < exposure_dust_limit_success_sats { + let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + amount_msat; + if on_counterparty_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { + return Err(ChannelError::Ignore(format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", + on_counterparty_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat()))); + } + } + + let exposure_dust_limit_timeout_sats = (self.get_dust_buffer_feerate() as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis; + if amount_msat / 1000 < exposure_dust_limit_timeout_sats { + let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + amount_msat; + if on_holder_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { + return Err(ChannelError::Ignore(format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", + on_holder_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat()))); + } + } + + let pending_value_to_self_msat = self.value_to_self_msat - outbound_stats.pending_htlcs_value_msat; if pending_value_to_self_msat < amount_msat { return Err(ChannelError::Ignore(format!("Cannot send value that would overdraw remaining funds. Amount: {}, pending value to self {}", amount_msat, pending_value_to_self_msat))); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 79f8562c..6c08065b 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1394,6 +1394,9 @@ pub fn test_default_channel_config() -> UserConfig { // When most of our tests were written, the default HTLC minimum was fixed at 1000. // It now defaults to 1, so we simply set it to the expected value here. default_config.own_channel_config.our_htlc_minimum_msat = 1000; + // When most of our tests were written, we didn't have the notion of a `max_dust_htlc_exposure_msat`, + // It now defaults to 5_000_000 msat; to avoid interfering with tests we bump it to 50_000_000 msat. + default_config.channel_options.max_dust_htlc_exposure_msat = 50_000_000; default_config } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 1d149b83..754ef563 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -9764,3 +9764,116 @@ fn test_keysend_payments_to_private_node() { pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage)); claim_payment(&nodes[0], &path, test_preimage); } + +fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, at_forward: bool, on_holder_tx: bool) { + // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat` policy. + // + // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and + // trimmed-to-dust HTLC outbound balance and this new payment as included on next counterparty + // commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the update. + // At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC inbound + // and trimmed-to-dust HTLC outbound balance and this new received HTLC as included on next + // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail the update. + // Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel might be + // available again for HTLC processing once the dust bandwidth has cleared up. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let mut config = test_default_channel_config(); + config.channel_options.max_dust_htlc_exposure_msat = 5_000_000; // default setting value + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); + let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + open_channel.max_htlc_value_in_flight_msat = 50_000_000; + open_channel.max_accepted_htlcs = 60; + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); + let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + if on_holder_tx { + accept_channel.dust_limit_satoshis = 660; + } + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel); + + let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 1_000_000, 42); + + if on_holder_tx { + if let Some(mut chan) = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) { + chan.holder_dust_limit_satoshis = 660; + } + } + + nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap(); + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); + check_added_monitors!(nodes[0], 1); + + let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); + let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked); + update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); + + if on_holder_tx { + if dust_outbound_balance { + for i in 0..2 { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 2_300_000); + if let Err(_) = nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); } + } + } else { + for _ in 0..2 { + route_payment(&nodes[0], &[&nodes[1]], 2_300_000); + } + } + } else { + if dust_outbound_balance { + for i in 0..25 { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 200_000); // + 177_000 msat of HTLC-success tx at 253 sats/kWU + if let Err(_) = nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); } + } + } else { + for _ in 0..25 { + route_payment(&nodes[0], &[&nodes[1]], 200_000); // + 167_000 msat of HTLC-timeout tx at 253 sats/kWU + } + } + } + + if at_forward { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { 2_300_000 } else { 200_000 }); + let mut config = UserConfig::default(); + if on_holder_tx { + unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", 6_900_000, config.channel_options.max_dust_htlc_exposure_msat))); + } else { + unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", 5_200_000, config.channel_options.max_dust_htlc_exposure_msat))); + } + } else { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1 ], if on_holder_tx { 2_300_000 } else { 200_000 }); + nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + if on_holder_tx { + nodes[1].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", 6_900_000, config.channel_options.max_dust_htlc_exposure_msat), 1); + } else { + nodes[1].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", 5_200_000, config.channel_options.max_dust_htlc_exposure_msat), 1); + } + } + + let _ = nodes[1].node.get_and_clear_pending_msg_events(); + let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); + added_monitors.clear(); +} + +#[test] +fn test_max_dust_htlc_exposure() { + do_test_max_dust_htlc_exposure(true, true, true); + do_test_max_dust_htlc_exposure(false, true, true); + do_test_max_dust_htlc_exposure(false, false, true); + do_test_max_dust_htlc_exposure(false, false, false); + do_test_max_dust_htlc_exposure(true, true, false); + do_test_max_dust_htlc_exposure(true, false, false); + do_test_max_dust_htlc_exposure(true, false, true); + do_test_max_dust_htlc_exposure(false, true, false); +} diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index 2e9e8e03..3348b7b2 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -206,7 +206,23 @@ pub struct ChannelConfig { /// This cannot be changed after a channel has been initialized. /// /// Default value: true. - pub commit_upfront_shutdown_pubkey: bool + pub commit_upfront_shutdown_pubkey: bool, + /// Limit our total exposure to in-flight HTLCs which are burned to fees as they are too + /// small to claim on-chain. + /// + /// When an HTLC present in one of our channels is below a "dust" threshold, the HTLC will + /// not be claimable on-chain, instead being turned into additional miner fees if either + /// party force-closes the channel. Because the threshold is per-HTLC, our total exposure + /// to such payments may be sustantial if there are many dust HTLCs present when the + /// channel is force-closed. + /// + /// This limit is applied for sent, forwarded, and received HTLCs and limits the total + /// exposure across all three types per-channel. Setting this too low may prevent the + /// sending or receipt of low-value HTLCs on high-traffic nodes, and this limit is very + /// important to prevent stealing of dust HTLCs by miners. + /// + /// Default value: 5_000_000 msat. + pub max_dust_htlc_exposure_msat: u64, } impl Default for ChannelConfig { @@ -218,12 +234,14 @@ impl Default for ChannelConfig { cltv_expiry_delta: 6 * 12, // 6 blocks/hour * 12 hours announced_channel: false, commit_upfront_shutdown_pubkey: true, + max_dust_htlc_exposure_msat: 5_000_000, } } } impl_writeable_tlv_based!(ChannelConfig, { (0, forwarding_fee_proportional_millionths, required), + (1, max_dust_htlc_exposure_msat, (default_value, 5_000_000)), (2, cltv_expiry_delta, required), (4, announced_channel, required), (6, commit_upfront_shutdown_pubkey, required), diff --git a/lightning/src/util/ser_macros.rs b/lightning/src/util/ser_macros.rs index 5d5171ad..960fae45 100644 --- a/lightning/src/util/ser_macros.rs +++ b/lightning/src/util/ser_macros.rs @@ -8,6 +8,9 @@ // licenses. macro_rules! encode_tlv { + ($stream: expr, $type: expr, $field: expr, (default_value, $default: expr)) => { + encode_tlv!($stream, $type, $field, required) + }; ($stream: expr, $type: expr, $field: expr, required) => { BigSize($type).write($stream)?; BigSize($field.serialized_length() as u64).write($stream)?; @@ -26,7 +29,7 @@ macro_rules! encode_tlv { } macro_rules! encode_tlv_stream { - ($stream: expr, {$(($type: expr, $field: expr, $fieldty: ident)),*}) => { { + ($stream: expr, {$(($type: expr, $field: expr, $fieldty: tt)),* $(,)*}) => { { #[allow(unused_imports)] use { ln::msgs::DecodeError, @@ -53,6 +56,9 @@ macro_rules! encode_tlv_stream { } macro_rules! get_varint_length_prefixed_tlv_length { + ($len: expr, $type: expr, $field: expr, (default_value, $default: expr)) => { + get_varint_length_prefixed_tlv_length!($len, $type, $field, required) + }; ($len: expr, $type: expr, $field: expr, required) => { BigSize($type).write(&mut $len).expect("No in-memory data may fail to serialize"); let field_len = $field.serialized_length(); @@ -73,7 +79,7 @@ macro_rules! get_varint_length_prefixed_tlv_length { } macro_rules! encode_varint_length_prefixed_tlv { - ($stream: expr, {$(($type: expr, $field: expr, $fieldty: ident)),*}) => { { + ($stream: expr, {$(($type: expr, $field: expr, $fieldty: tt)),*}) => { { use util::ser::BigSize; let len = { #[allow(unused_mut)] @@ -89,38 +95,55 @@ macro_rules! encode_varint_length_prefixed_tlv { } macro_rules! check_tlv_order { - ($last_seen_type: expr, $typ: expr, $type: expr, required) => {{ + ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (default_value, $default: expr)) => {{ + #[allow(unused_comparisons)] // Note that $type may be 0 making the second comparison always true + let invalid_order = ($last_seen_type.is_none() || $last_seen_type.unwrap() < $type) && $typ.0 > $type; + if invalid_order { + $field = $default; + } + }}; + ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, required) => {{ #[allow(unused_comparisons)] // Note that $type may be 0 making the second comparison always true let invalid_order = ($last_seen_type.is_none() || $last_seen_type.unwrap() < $type) && $typ.0 > $type; if invalid_order { return Err(DecodeError::InvalidValue); } }}; - ($last_seen_type: expr, $typ: expr, $type: expr, option) => {{ + ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, option) => {{ // no-op }}; - ($last_seen_type: expr, $typ: expr, $type: expr, vec_type) => {{ + ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, vec_type) => {{ // no-op }}; } macro_rules! check_missing_tlv { - ($last_seen_type: expr, $type: expr, required) => {{ + ($last_seen_type: expr, $type: expr, $field: ident, (default_value, $default: expr)) => {{ + #[allow(unused_comparisons)] // Note that $type may be 0 making the second comparison always true + let missing_req_type = $last_seen_type.is_none() || $last_seen_type.unwrap() < $type; + if missing_req_type { + $field = $default; + } + }}; + ($last_seen_type: expr, $type: expr, $field: ident, required) => {{ #[allow(unused_comparisons)] // Note that $type may be 0 making the second comparison always true let missing_req_type = $last_seen_type.is_none() || $last_seen_type.unwrap() < $type; if missing_req_type { return Err(DecodeError::InvalidValue); } }}; - ($last_seen_type: expr, $type: expr, vec_type) => {{ + ($last_seen_type: expr, $type: expr, $field: ident, vec_type) => {{ // no-op }}; - ($last_seen_type: expr, $type: expr, option) => {{ + ($last_seen_type: expr, $type: expr, $field: ident, option) => {{ // no-op }}; } macro_rules! decode_tlv { + ($reader: expr, $field: ident, (default_value, $default: expr)) => {{ + decode_tlv!($reader, $field, required) + }}; ($reader: expr, $field: ident, required) => {{ $field = ser::Readable::read(&mut $reader)?; }}; @@ -133,7 +156,7 @@ macro_rules! decode_tlv { } macro_rules! decode_tlv_stream { - ($stream: expr, {$(($type: expr, $field: ident, $fieldty: ident)),* $(,)*}) => { { + ($stream: expr, {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}) => { { use ln::msgs::DecodeError; let mut last_seen_type: Option = None; 'tlv_read: loop { @@ -168,7 +191,7 @@ macro_rules! decode_tlv_stream { } // As we read types, make sure we hit every required type: $({ - check_tlv_order!(last_seen_type, typ, $type, $fieldty); + check_tlv_order!(last_seen_type, typ, $type, $field, $fieldty); })* last_seen_type = Some(typ.0); @@ -192,7 +215,7 @@ macro_rules! decode_tlv_stream { } // Make sure we got to each required type after we've read every TLV: $({ - check_missing_tlv!(last_seen_type, $type, $fieldty); + check_missing_tlv!(last_seen_type, $type, $field, $fieldty); })* } } } @@ -326,7 +349,7 @@ macro_rules! write_ver_prefix { /// This is the preferred method of adding new fields that old nodes can ignore and still function /// correctly. macro_rules! write_tlv_fields { - ($stream: expr, {$(($type: expr, $field: expr, $fieldty: ident)),* $(,)*}) => { + ($stream: expr, {$(($type: expr, $field: expr, $fieldty: tt)),* $(,)*}) => { encode_varint_length_prefixed_tlv!($stream, {$(($type, $field, $fieldty)),*}); } } @@ -347,7 +370,7 @@ macro_rules! read_ver_prefix { /// Reads a suffix added by write_tlv_fields. macro_rules! read_tlv_fields { - ($stream: expr, {$(($type: expr, $field: ident, $fieldty: ident)),* $(,)*}) => { { + ($stream: expr, {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}) => { { let tlv_len = ::util::ser::BigSize::read($stream)?; let mut rd = ::util::ser::FixedLengthReader::new($stream, tlv_len.0); decode_tlv_stream!(&mut rd, {$(($type, $field, $fieldty)),*}); @@ -356,6 +379,9 @@ macro_rules! read_tlv_fields { } macro_rules! init_tlv_based_struct_field { + ($field: ident, (default_value, $default: expr)) => { + $field + }; ($field: ident, option) => { $field }; @@ -368,6 +394,9 @@ macro_rules! init_tlv_based_struct_field { } macro_rules! init_tlv_field_var { + ($field: ident, (default_value, $default: expr)) => { + let mut $field = $default; + }; ($field: ident, required) => { let mut $field = ::util::ser::OptionDeserWrapper(None); }; @@ -385,7 +414,7 @@ macro_rules! init_tlv_field_var { /// if $fieldty is `vec_type`, then $field is a Vec, which needs to have its individual elements /// serialized. macro_rules! impl_writeable_tlv_based { - ($st: ident, {$(($type: expr, $field: ident, $fieldty: ident)),* $(,)*}) => { + ($st: ident, {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}) => { impl ::util::ser::Writeable for $st { fn write(&self, writer: &mut W) -> Result<(), $crate::io::Error> { write_tlv_fields!(writer, { @@ -441,7 +470,7 @@ macro_rules! impl_writeable_tlv_based { /// Attempts to read an unknown type byte result in DecodeError::UnknownRequiredFeature. macro_rules! impl_writeable_tlv_based_enum { ($st: ident, $(($variant_id: expr, $variant_name: ident) => - {$(($type: expr, $field: ident, $fieldty: ident)),* $(,)*} + {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*} ),* $(,)*; $(($tuple_variant_id: expr, $tuple_variant_name: ident)),* $(,)*) => { impl ::util::ser::Writeable for $st {