+
+#[test]
+fn test_double_partial_claim() {
+ // Test what happens if a node receives a payment, generates a PaymentReceived event, the HTLCs
+ // time out, the sender resends only some of the MPP parts, then the user processes the
+ // PaymentReceived event, ensuring they don't inadvertently claim only part of the full payment
+ // amount.
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+
+ let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
+ assert_eq!(route.paths.len(), 2);
+ route.paths.sort_by(|path_a, _| {
+ // Sort the path so that the path through nodes[1] comes first
+ if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
+ core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
+ });
+
+ send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret);
+ // nodes[3] has now received a PaymentReceived event...which it will take some (exorbitant)
+ // amount of time to respond to.
+
+ // Connect some blocks to time out the payment
+ connect_blocks(&nodes[3], TEST_FINAL_CLTV);
+ connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
+
+ expect_pending_htlcs_forwardable!(nodes[3]);
+
+ pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
+
+ // nodes[1] now retries one of the two paths...
+ nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ check_added_monitors!(nodes[0], 2);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
+
+ // At this point nodes[3] has received one half of the payment, and the user goes to handle
+ // that PaymentReceived event they got hours ago and never handled...we should refuse to claim.
+ nodes[3].node.claim_funds(payment_preimage);
+ check_added_monitors!(nodes[3], 0);
+ assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
+}
+
+fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
+ // Test what happens if a node receives an MPP payment, claims it, but crashes before
+ // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only
+ // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still
+ // have the PaymentReceived event, (b) have one (or two) channel(s) that goes on chain with the
+ // HTLC preimage in them, and (c) optionally have one channel that is live off-chain but does
+ // not have the preimage tied to the still-pending HTLC.
+ //
+ // To get to the correct state, on startup we should propagate the preimage to the
+ // still-off-chain channel, claiming the HTLC as soon as the peer connects, with the monitor
+ // receiving the preimage without a state update.
+ //
+ // Further, we should generate a `PaymentClaimed` event to inform the user that the payment was
+ // definitely claimed.
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_3_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+ let chan_id_persisted = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()).2;
+ let chan_id_not_persisted = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()).2;
+
+ // Create an MPP route for 15k sats, more than the default htlc-max of 10%
+ let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
+ assert_eq!(route.paths.len(), 2);
+ route.paths.sort_by(|path_a, _| {
+ // Sort the path so that the path through nodes[1] comes first
+ if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
+ core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
+ });
+
+ nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ check_added_monitors!(nodes[0], 2);
+
+ // Send the payment through to nodes[3] *without* clearing the PaymentReceived event
+ let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(send_events.len(), 2);
+ do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None);
+ do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None);
+
+ // Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s
+ // monitors and ChannelManager, for use later, if we don't want to persist both monitors.
+ let mut original_monitor = test_utils::TestVecWriter(Vec::new());
+ if !persist_both_monitors {
+ for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+ if outpoint.to_channel_id() == chan_id_not_persisted {
+ assert!(original_monitor.0.is_empty());
+ nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
+ }
+ }
+ }
+
+ let mut original_manager = test_utils::TestVecWriter(Vec::new());
+ nodes[3].node.write(&mut original_manager).unwrap();
+
+ expect_payment_received!(nodes[3], payment_hash, payment_secret, 15_000_000);
+
+ nodes[3].node.claim_funds(payment_preimage);
+ check_added_monitors!(nodes[3], 2);
+ expect_payment_claimed!(nodes[3], payment_hash, 15_000_000);
+
+ // Now fetch one of the two updated ChannelMonitors from nodes[3], and restart pretending we
+ // crashed in between the two persistence calls - using one old ChannelMonitor and one new one,
+ // with the old ChannelManager.
+ let mut updated_monitor = test_utils::TestVecWriter(Vec::new());
+ for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+ if outpoint.to_channel_id() == chan_id_persisted {
+ assert!(updated_monitor.0.is_empty());
+ nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap();
+ }
+ }
+ // If `persist_both_monitors` is set, get the second monitor here as well
+ if persist_both_monitors {
+ for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+ if outpoint.to_channel_id() == chan_id_not_persisted {
+ assert!(original_monitor.0.is_empty());
+ nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
+ }
+ }
+ }
+
+ // Now restart nodes[3].
+ persister = test_utils::TestPersister::new();
+ let keys_manager = &chanmon_cfgs[3].keys_manager;
+ new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[3].chain_source), nodes[3].tx_broadcaster.clone(), nodes[3].logger, node_cfgs[3].fee_estimator, &persister, keys_manager);
+ nodes[3].chain_monitor = &new_chain_monitor;
+ let mut monitors = Vec::new();
+ for mut monitor_data in [original_monitor, updated_monitor].iter() {
+ let (_, mut deserialized_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut &monitor_data.0[..], keys_manager).unwrap();
+ monitors.push(deserialized_monitor);
+ }
+
+ let config = UserConfig::default();
+ nodes_3_deserialized = {
+ let mut channel_monitors = HashMap::new();
+ for monitor in monitors.iter_mut() {
+ channel_monitors.insert(monitor.get_funding_txo().0, monitor);
+ }
+ <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut &original_manager.0[..], ChannelManagerReadArgs {
+ default_config: config,
+ keys_manager,
+ fee_estimator: node_cfgs[3].fee_estimator,
+ chain_monitor: nodes[3].chain_monitor,
+ tx_broadcaster: nodes[3].tx_broadcaster.clone(),
+ logger: nodes[3].logger,
+ channel_monitors,
+ }).unwrap().1
+ };
+ nodes[3].node = &nodes_3_deserialized;
+
+ for monitor in monitors {
+ // On startup the preimage should have been copied into the non-persisted monitor:
+ assert!(monitor.get_stored_preimages().contains_key(&payment_hash));
+ nodes[3].chain_monitor.watch_channel(monitor.get_funding_txo().0.clone(), monitor).unwrap();
+ }
+ check_added_monitors!(nodes[3], 2);
+
+ nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
+ nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
+
+ // During deserialization, we should have closed one channel and broadcast its latest
+ // commitment transaction. We should also still have the original PaymentReceived event we
+ // never finished processing.
+ let events = nodes[3].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), if persist_both_monitors { 4 } else { 3 });
+ if let Event::PaymentReceived { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); }
+ if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
+ if persist_both_monitors {
+ if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
+ }
+
+ // On restart, we should also get a duplicate PaymentClaimed event as we persisted the
+ // ChannelManager prior to handling the original one.
+ if let Event::PaymentClaimed { payment_hash: our_payment_hash, amount_msat: 15_000_000, .. } =
+ events[if persist_both_monitors { 3 } else { 2 }]
+ {
+ assert_eq!(payment_hash, our_payment_hash);
+ } else { panic!(); }
+
+ assert_eq!(nodes[3].node.list_channels().len(), if persist_both_monitors { 0 } else { 1 });
+ if !persist_both_monitors {
+ // If one of the two channels is still live, reveal the payment preimage over it.
+
+ nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]);
+ nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]);
+
+ nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]);
+ get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[3].node.get_our_node_id());
+ assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
+
+ nodes[3].node.handle_channel_reestablish(&nodes[2].node.get_our_node_id(), &reestablish_2[0]);
+
+ // Once we call `get_and_clear_pending_msg_events` the holding cell is cleared and the HTLC
+ // claim should fly.
+ let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events();
+ check_added_monitors!(nodes[3], 1);
+ assert_eq!(ds_msgs.len(), 2);
+ if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); }
+
+ let cs_updates = match ds_msgs[0] {
+ MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
+ nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ check_added_monitors!(nodes[2], 1);
+ let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
+ expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false);
+ commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true);
+ cs_updates
+ }
+ _ => panic!(),
+ };
+
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[2], cs_updates.commitment_signed, false, true);
+ expect_payment_sent!(nodes[0], payment_preimage);
+ }
+}
+
+#[test]
+fn test_partial_claim_before_restart() {
+ do_test_partial_claim_before_restart(false);
+ do_test_partial_claim_before_restart(true);
+}
+
+/// The possible events which may trigger a `max_dust_htlc_exposure` breach
+#[derive(Clone, Copy, PartialEq)]
+enum ExposureEvent {
+ /// Breach occurs at HTLC forwarding (see `send_htlc`)
+ AtHTLCForward,
+ /// Breach occurs at HTLC reception (see `update_add_htlc`)
+ AtHTLCReception,
+ /// Breach occurs at outbound update_fee (see `send_update_fee`)
+ AtUpdateFeeOutbound,
+}
+
+fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool) {
+ // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
+ // policy.
+ //
+ // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
+ // trimmed-to-dust HTLC outbound balance and this new payment as included on next
+ // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the
+ // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC
+ // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included
+ // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail
+ // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel
+ // might be available again for HTLC processing once the dust bandwidth has cleared up.
+
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let mut config = test_default_channel_config();
+ config.channel_options.max_dust_htlc_exposure_msat = 5_000_000; // default setting value
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
+ let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ open_channel.max_htlc_value_in_flight_msat = 50_000_000;
+ open_channel.max_accepted_htlcs = 60;
+ if on_holder_tx {
+ open_channel.dust_limit_satoshis = 546;
+ }
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
+ let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+
+ let opt_anchors = false;
+
+ let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+
+ if on_holder_tx {
+ if let Some(mut chan) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) {
+ chan.holder_dust_limit_satoshis = 546;
+ }
+ }
+
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
+ check_added_monitors!(nodes[0], 1);
+
+ let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+ let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
+ update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
+
+ let dust_buffer_feerate = {
+ let chan_lock = nodes[0].node.channel_state.lock().unwrap();
+ let chan = chan_lock.by_id.get(&channel_id).unwrap();
+ chan.get_dust_buffer_feerate(None) as u64
+ };
+ let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+ let dust_outbound_htlc_on_holder_tx: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
+
+ let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+ let dust_inbound_htlc_on_holder_tx: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
+
+ let dust_htlc_on_counterparty_tx: u64 = 25;
+ let dust_htlc_on_counterparty_tx_msat: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
+
+ if on_holder_tx {
+ if dust_outbound_balance {
+ // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
+ // Outbound dust balance: 4372 sats
+ // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats
+ for i in 0..dust_outbound_htlc_on_holder_tx {
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat);
+ if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); }
+ }
+ } else {
+ // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
+ // Inbound dust balance: 4372 sats
+ // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats
+ for _ in 0..dust_inbound_htlc_on_holder_tx {
+ route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat);
+ }
+ }
+ } else {
+ if dust_outbound_balance {
+ // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
+ // Outbound dust balance: 5000 sats
+ for i in 0..dust_htlc_on_counterparty_tx {
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat);
+ if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); }
+ }
+ } else {
+ // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
+ // Inbound dust balance: 5000 sats
+ for _ in 0..dust_htlc_on_counterparty_tx {
+ route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat);
+ }
+ }
+ }
+
+ let dust_overflow = dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx + 1);
+ if exposure_breach_event == ExposureEvent::AtHTLCForward {
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat });
+ let mut config = UserConfig::default();
+ // With default dust exposure: 5000 sats
+ if on_holder_tx {
+ let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * (dust_outbound_htlc_on_holder_tx + 1);
+ let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * dust_inbound_htlc_on_holder_tx + dust_outbound_htlc_on_holder_tx_msat;
+ unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_options.max_dust_htlc_exposure_msat)));
+ } else {
+ unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_options.max_dust_htlc_exposure_msat)));
+ }
+ } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat });
+ nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let mut events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.remove(0));
+ nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
+ // With default dust exposure: 5000 sats
+ if on_holder_tx {
+ // Outbound dust balance: 6399 sats
+ let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
+ let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
+ nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_options.max_dust_htlc_exposure_msat), 1);
+ } else {
+ // Outbound dust balance: 5200 sats
+ nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_options.max_dust_htlc_exposure_msat), 1);
+ }
+ } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 2_500_000);
+ if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at update_fee-swallowed HTLC", ); }
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock = *feerate_lock * 10;
+ }
+ nodes[0].node.timer_tick_occurred();
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].logger.assert_log_contains("lightning::ln::channel".to_string(), "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure".to_string(), 1);
+ }
+
+ let _ = nodes[0].node.get_and_clear_pending_msg_events();
+ let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
+ added_monitors.clear();
+}
+
+#[test]
+fn test_max_dust_htlc_exposure() {
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true);
+}