X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=4b38cb32c27658225f8b61e61da78002b269693f;hb=1c157a2328d23079230143314819d0128ec314fd;hp=eace4c5883066794c51cd999f6357a4631ed5268;hpb=31975c5994c8daca5b69c7c3b7d5466409ae8c2d;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index eace4c58..4b38cb32 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -18,12 +18,12 @@ use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PER use chain::transaction::OutPoint; use chain::keysinterface::BaseSign; use ln::{PaymentPreimage, PaymentSecret, PaymentHash}; -use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC}; -use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA}; +use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT}; +use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, PAYMENT_EXPIRY_BLOCKS }; use ln::channel::{Channel, ChannelError}; use ln::{chan_utils, onion_utils}; -use ln::chan_utils::{HTLC_SUCCESS_TX_WEIGHT, HTLCOutputInCommitment}; -use routing::network_graph::{NetworkUpdate, RoutingFees}; +use ln::chan_utils::{HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, HTLCOutputInCommitment}; +use routing::network_graph::RoutingFees; use routing::router::{Payee, Route, RouteHop, RouteHint, RouteHintHop, RouteParameters, find_route, get_route}; use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures}; use ln::msgs; @@ -42,9 +42,6 @@ use bitcoin::blockdata::opcodes; use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; - use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::key::{PublicKey,SecretKey}; @@ -108,8 +105,6 @@ fn test_insane_channel_opens() { insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg }); - insane_open_helper(r"Bogus; channel reserve \(\d+\) is less than dust limit \(\d+\)", |mut msg| { msg.dust_limit_satoshis = msg.channel_reserve_satoshis + 1; msg }); - insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg }); insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg }); @@ -119,6 +114,67 @@ fn test_insane_channel_opens() { insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg }); } +fn do_test_counterparty_no_reserve(send_from_initiator: bool) { + // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure, + // but only for them. Because some LSPs do it with some level of trust of the clients (for a + // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often + // in normal testing, we test it explicitly here. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Have node0 initiate a channel to node1 with aforementioned parameters + let mut push_amt = 100_000_000; + let feerate_per_kw = 253; + push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; + push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; + + let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap(); + let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + if !send_from_initiator { + open_channel_message.channel_reserve_satoshis = 0; + open_channel_message.max_htlc_value_in_flight_msat = 100_000_000; + } + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_message); + + // Extract the channel accept message from node1 to node0 + let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + if send_from_initiator { + accept_channel_message.channel_reserve_satoshis = 0; + accept_channel_message.max_htlc_value_in_flight_msat = 100_000_000; + } + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel_message); + { + let mut lock; + let mut chan = get_channel_ref!(if send_from_initiator { &nodes[1] } else { &nodes[0] }, lock, temp_channel_id); + chan.holder_selected_channel_reserve_satoshis = 0; + chan.holder_max_htlc_value_in_flight_msat = 100_000_000; + } + + let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id); + let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx); + create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0); + + // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s + // security model if it ever tries to send funds back to nodes[0] (but that's not our problem). + if send_from_initiator { + send_payment(&nodes[0], &[&nodes[1]], 100_000_000 + // Note that for outbound channels we have to consider the commitment tx fee and the + // "fee spike buffer", which is currently a multiple of the total commitment tx fee as + // well as an additional HTLC. + - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2)); + } else { + send_payment(&nodes[1], &[&nodes[0]], push_amt); + } +} + +#[test] +fn test_counterparty_no_reserve() { + do_test_counterparty_no_reserve(true); + do_test_counterparty_no_reserve(false); +} + #[test] fn test_async_inbound_update_fee() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -584,12 +640,19 @@ fn test_update_fee_that_funder_cannot_afford() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let channel_value = 1977; - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 700000, InitFeatures::known(), InitFeatures::known()); + let channel_value = 5000; + let push_sats = 700; + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000, InitFeatures::known(), InitFeatures::known()); let channel_id = chan.2; let secp_ctx = Secp256k1::new(); - - let feerate = 260; + let bs_channel_reserve_sats = Channel::::get_holder_selected_channel_reserve_satoshis(channel_value); + + // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee + // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we + // calculate two different feerates here - the expected local limit as well as the expected + // remote limit. + let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (COMMITMENT_TX_BASE_WEIGHT + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32; + let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / COMMITMENT_TX_BASE_WEIGHT) as u32; { let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); *feerate_lock = feerate; @@ -602,27 +665,25 @@ fn test_update_fee_that_funder_cannot_afford() { commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false); - //Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate of 260 set above. - //This value results in a fee that is exactly what the funder can afford (277 sat + 1000 sat channel reserve) + // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above. { let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone(); - //We made sure neither party's funds are below the dust limit so -2 non-HTLC txns from number of outputs - let num_htlcs = commitment_tx.output.len() - 2; - let total_fee: u64 = feerate as u64 * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000; + //We made sure neither party's funds are below the dust limit and there are no HTLCs here + assert_eq!(commitment_tx.output.len(), 2); + let total_fee: u64 = commit_tx_fee_msat(feerate, 0) / 1000; let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value); actual_fee = channel_value - actual_fee; assert_eq!(total_fee, actual_fee); } - //Add 2 to the previous fee rate to the final fee increases by 1 (with no HTLCs the fee is essentially - //fee_rate*(724/1000) so the increment of 1*0.724 is rounded back down) { + // Increment the feerate by a small constant, accounting for rounding errors let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = feerate + 2; + *feerate_lock += 4; } nodes[0].node.timer_tick_occurred(); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot afford to send new feerate at {}", feerate + 2), 1); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot afford to send new feerate at {}", feerate + 4), 1); check_added_monitors!(nodes[0], 0); const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; @@ -658,11 +719,11 @@ fn test_update_fee_that_funder_cannot_afford() { let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![]; let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( INITIAL_COMMITMENT_NUMBER - 1, - 700, - 999, + push_sats, + channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0) / 1000, false, local_funding, remote_funding, commit_tx_keys.clone(), - feerate + 124, + non_buffer_feerate + 4, &mut htlcs, &local_chan.channel_transaction_parameters.as_counterparty_broadcastable() ); @@ -677,7 +738,7 @@ fn test_update_fee_that_funder_cannot_afford() { let update_fee = msgs::UpdateFee { channel_id: chan.2, - feerate_per_kw: feerate + 124, + feerate_per_kw: non_buffer_feerate + 4, }; nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee); @@ -1207,7 +1268,7 @@ fn test_duplicate_htlc_different_direction_onchain() { let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000); let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000); - let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, 0).unwrap(); + let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200).unwrap(); send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret); // Provide preimage to node 0 by claiming payment @@ -1446,21 +1507,21 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { // sending any above-dust amount would result in a channel reserve violation. // In this test we check that we would be prevented from sending an HTLC in // this situation. - let feerate_per_kw = 253; - chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) }; - chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) }; + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let mut push_amt = 100_000_000; - push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64); push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known()); // Sending exactly enough to hit the reserve amount should be accepted - let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } // However one more HTLC should be significantly over the reserve amount and fail. let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000); @@ -1473,30 +1534,36 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { #[test] fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let mut chanmon_cfgs = create_chanmon_cfgs(2); - // Set the fee rate for the channel very high, to the point where the funder - // receiving 1 update_add_htlc would result in them closing the channel due - // to channel reserve violation. This close could also happen if the fee went - // up a more realistic amount, but many HTLCs were outstanding at the time of - // the update_add_htlc. - chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(6000) }; - chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(6000) }; + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000); + // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a + // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment + // transaction fee with 0 HTLCs (183 sats)). + let mut push_amt = 100_000_000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64); + push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known()); + + // Send four HTLCs to cover the initial push_msat buffer we're required to include + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } + + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 700_000); // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() let secp_ctx = Secp256k1::new(); let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1; let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap(); - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 1000, &Some(payment_secret), cur_height, &None).unwrap(); + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 700_000, &Some(payment_secret), cur_height, &None).unwrap(); let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash); let msg = msgs::UpdateAddHTLC { channel_id: chan.2, - htlc_id: 1, - amount_msat: htlc_msat + 1, + htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64, + amount_msat: htlc_msat, payment_hash: payment_hash, cltv_expiry: htlc_cltv, onion_routing_packet: onion_packet, @@ -1517,9 +1584,7 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { // Test that if we receive many dust HTLCs over an outbound channel, they don't count when // calculating our commitment transaction fee (this was previously broken). let mut chanmon_cfgs = create_chanmon_cfgs(2); - let feerate_per_kw = 253; - chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) }; - chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) }; + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); @@ -1529,7 +1594,7 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment // transaction fee with 0 HTLCs (183 sats)). let mut push_amt = 100_000_000; - push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT) / 1000 * 1000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64); push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt, InitFeatures::known(), InitFeatures::known()); @@ -1540,12 +1605,52 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { // commitment transaction fee. let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt); + // Send four HTLCs to cover the initial push_msat buffer we're required to include + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } + // One more than the dust amt should fail, however. let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt + 1); unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value")); } +#[test] +fn test_chan_init_feerate_unaffordability() { + // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to + // channel reserve and feerate requirements. + let mut chanmon_cfgs = create_chanmon_cfgs(2); + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single + // HTLC. + let mut push_amt = 100_000_000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64); + assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None).unwrap_err(), + APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() }); + + // During open, we don't have a "counterparty channel reserve" to check against, so that + // requirement only comes into play on the open_channel handling side. + push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap(); + let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + open_channel_msg.push_msat += 1; + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_msg); + + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + match msg_events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { + assert_eq!(msg.data, "Insufficient funding amount for initial reserve"); + }, + _ => panic!("Unexpected event"), + } +} + #[test] fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { // Test that if we receive many dust HTLCs over an inbound channel, they don't count when @@ -1959,7 +2064,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - expect_payment_sent!(nodes[0], payment_preimage_1); + expect_payment_sent_without_paths!(nodes[0], payment_preimage_1); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg); @@ -1988,7 +2093,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - expect_payment_sent!(nodes[0], payment_preimage_2); + expect_payment_sent_without_paths!(nodes[0], payment_preimage_2); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); @@ -2001,6 +2106,7 @@ fn channel_reserve_in_flight_removes() { // resolve the second HTLC from A's point of view. nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back @@ -2030,6 +2136,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); @@ -2709,7 +2816,7 @@ fn test_htlc_on_chain_success() { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 3); + assert_eq!(events.len(), 5); let mut first_claimed = false; for event in events { match event { @@ -2722,6 +2829,7 @@ fn test_htlc_on_chain_success() { assert_eq!(payment_hash, payment_hash_2); } }, + Event::PaymentPathSuccessful { .. } => {}, Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, _ => panic!("Unexpected event"), } @@ -3041,9 +3149,10 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use mine_transaction(&nodes[1], &revoked_local_txn[0]); check_added_monitors!(nodes[1], 1); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + assert!(ANTI_REORG_DELAY > PAYMENT_EXPIRY_BLOCKS); // We assume payments will also expire let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 }); + assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 4 }); match events[0] { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { }, _ => panic!("Unexepected event"), @@ -3056,6 +3165,12 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use } if !deliver_bs_raa { match events[2] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(*payment_hash, fourth_payment_hash); + }, + _ => panic!("Unexpected event"), + } + match events[3] { Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; @@ -3356,13 +3471,13 @@ fn test_dup_events_on_peer_disconnect() { check_added_monitors!(nodes[1], 1); let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]); - expect_payment_sent!(nodes[0], payment_preimage); + expect_payment_sent_without_paths!(nodes[0], payment_preimage); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); - assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + expect_payment_path_successful!(nodes[0]); } #[test] @@ -3402,7 +3517,7 @@ fn test_simple_peer_disconnect() { reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false)); { let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); + assert_eq!(events.len(), 3); match events[0] { Event::PaymentSent { payment_preimage, payment_hash, .. } => { assert_eq!(payment_preimage, payment_preimage_3); @@ -3417,6 +3532,10 @@ fn test_simple_peer_disconnect() { }, _ => panic!("Unexpected event"), } + match events[2] { + Event::PaymentPathSuccessful { .. } => {}, + _ => panic!("Unexpected event"), + } } claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4); @@ -3606,15 +3725,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken if messages_delivered < 2 { reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); if messages_delivered < 1 { - let events_4 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_4.len(), 1); - match events_4[0] { - Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => { - assert_eq!(payment_preimage_1, *payment_preimage); - assert_eq!(payment_hash_1, *payment_hash); - }, - _ => panic!("Unexpected event"), - } + expect_payment_sent!(nodes[0], payment_preimage_1); } else { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -3632,10 +3743,18 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } + if messages_delivered == 1 || messages_delivered == 2 { + expect_payment_path_successful!(nodes[0]); + } + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + if messages_delivered > 2 { + expect_payment_path_successful!(nodes[0]); + } + // Channel should still work fine... let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0; @@ -3947,6 +4066,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } @@ -4068,7 +4188,14 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { } expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false); } else { - expect_payment_failed!(nodes[1], second_payment_hash, true); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + if let Event::PaymentPathFailed { ref payment_hash, .. } = events[0] { + assert_eq!(*payment_hash, second_payment_hash); + } else { panic!("Unexpected event"); } + if let Event::PaymentFailed { ref payment_hash, .. } = events[1] { + assert_eq!(*payment_hash, second_payment_hash); + } else { panic!("Unexpected event"); } } } @@ -4500,7 +4627,7 @@ fn test_claim_sizeable_push_msat() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known()); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known()); nodes[1].node.force_close_channel(&chan.2).unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); @@ -4529,7 +4656,7 @@ fn test_claim_on_remote_sizeable_push_msat() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known()); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known()); nodes[0].node.force_close_channel(&chan.2).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -5012,7 +5139,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000); - let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, 0).unwrap(); + let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200).unwrap(); // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte // script push size limit so that the below script length checks match // ACCEPTED_HTLC_SCRIPT_WEIGHT. @@ -5215,30 +5342,30 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000); // 2nd HTLC: - send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee + send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200).unwrap()); // not added < dust limit + HTLC tx fee // 3rd HTLC: - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee + send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200).unwrap()); // not added < dust limit + HTLC tx fee // 4th HTLC: let (_, payment_hash_3, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); // 5th HTLC: let (_, payment_hash_4, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000); // 6th HTLC: - send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, 0).unwrap()); + send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200).unwrap()); // 7th HTLC: - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, 0).unwrap()); + send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200).unwrap()); // 8th HTLC: let (_, payment_hash_5, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); // 9th HTLC: let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000); - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee + send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200).unwrap()); // not added < dust limit + HTLC tx fee // 10th HTLC: let (_, payment_hash_6, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee // 11th HTLC: let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000); - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, 0).unwrap()); + send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200).unwrap()); // Double-check that six of the new HTLC were added // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie, @@ -5614,24 +5741,16 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 }); + let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 }); // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being // present in B's local commitment transaction, but none of A's commitment transactions. - assert!(nodes[1].node.claim_funds(our_payment_preimage)); + assert!(nodes[1].node.claim_funds(payment_preimage)); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { payment_preimage, payment_hash, .. } => { - assert_eq!(payment_preimage, our_payment_preimage); - assert_eq!(payment_hash, our_payment_hash); - }, - _ => panic!("Unexpected event"), - } + expect_payment_sent_without_paths!(nodes[0], payment_preimage); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); check_added_monitors!(nodes[0], 1); @@ -5732,7 +5851,14 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); } else { - expect_payment_failed!(nodes[0], our_payment_hash, true); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + if let Event::PaymentPathFailed { ref payment_hash, .. } = events[0] { + assert_eq!(*payment_hash, our_payment_hash); + } else { panic!("Unexpected event"); } + if let Event::PaymentFailed { ref payment_hash, .. } = events[1] { + assert_eq!(*payment_hash, our_payment_hash); + } else { panic!("Unexpected event"); } } } @@ -6053,15 +6179,7 @@ fn test_free_and_fail_holding_cell_htlcs() { let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => { - assert_eq!(*payment_preimage, payment_preimage_1); - assert_eq!(*payment_hash, payment_hash_1); - } - _ => panic!("Unexpected event"), - } + expect_payment_sent!(nodes[0], payment_preimage_1); } // Test that if we fail to forward an HTLC that is being freed from the holding cell that the @@ -7048,7 +7166,7 @@ fn test_user_configurable_csv_delay() { nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.to_self_delay = 200; - if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &low_our_to_self_config, 0) { + if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &low_our_to_self_config, 0, &nodes[0].logger) { match error { ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); }, _ => panic!("Unexpected event"), @@ -7077,7 +7195,7 @@ fn test_user_configurable_csv_delay() { nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.to_self_delay = 200; - if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &high_their_to_self_config, 0) { + if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &high_their_to_self_config, 0, &nodes[0].logger) { match error { ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); }, _ => panic!("Unexpected event"), @@ -7219,7 +7337,7 @@ fn test_check_htlc_underpaying() { let payee = Payee::from_node_id(nodes[1].node.get_our_node_id()).with_features(InvoiceFeatures::known()); let route = get_route(&nodes[0].node.get_our_node_id(), &payee, nodes[0].network_graph, None, 10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer).unwrap(); let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]); - let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, 0).unwrap(); + let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200).unwrap(); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -7272,9 +7390,9 @@ fn test_announce_disable_channels() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let short_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let short_id_2 = create_announced_chan_between_nodes(&nodes, 1, 0, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let short_id_3 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes(&nodes, 1, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); // Disconnect peers nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); @@ -7284,13 +7402,13 @@ fn test_announce_disable_channels() { nodes[0].node.timer_tick_occurred(); // DisabledStaged -> Disabled let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); - let mut chans_disabled: HashSet = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect(); + let mut chans_disabled = HashMap::new(); for e in msg_events { match e { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set // Check that each channel gets updated exactly once - if !chans_disabled.remove(&msg.contents.short_channel_id) { + if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() { panic!("Generated ChannelUpdate for wrong chan!"); } }, @@ -7326,19 +7444,22 @@ fn test_announce_disable_channels() { nodes[0].node.timer_tick_occurred(); let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); - chans_disabled = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect(); for e in msg_events { match e { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off - // Check that each channel gets updated exactly once - if !chans_disabled.remove(&msg.contents.short_channel_id) { - panic!("Generated ChannelUpdate for wrong chan!"); + match chans_disabled.remove(&msg.contents.short_channel_id) { + // Each update should have a higher timestamp than the previous one, replacing + // the old one. + Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp), + None => panic!("Generated ChannelUpdate for wrong chan!"), } }, _ => panic!("Unexpected event"), } } + // Check that each channel gets updated exactly once + assert!(chans_disabled.is_empty()); } #[test] @@ -8073,7 +8194,7 @@ fn test_preimage_storage() { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; { - let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, 42); + let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200).unwrap(); let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -8090,8 +8211,7 @@ fn test_preimage_storage() { match events[0] { Event::PaymentReceived { ref purpose, .. } => { match &purpose { - PaymentPurpose::InvoicePayment { payment_preimage, user_payment_id, .. } => { - assert_eq!(*user_payment_id, 42); + PaymentPurpose::InvoicePayment { payment_preimage, .. } => { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap()); }, _ => panic!("expected PaymentPurpose::InvoicePayment") @@ -8102,8 +8222,10 @@ fn test_preimage_storage() { } #[test] +#[allow(deprecated)] fn test_secret_timeout() { - // Simple test of payment secret storage time outs + // Simple test of payment secret storage time outs. After + // `create_inbound_payment(_for_hash)_legacy` is removed, this test will be removed as well. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -8111,11 +8233,11 @@ fn test_secret_timeout() { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment(Some(100_000), 2, 0); + let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment_legacy(Some(100_000), 2).unwrap(); // We should fail to register the same payment hash twice, at least until we've connected a // block with time 7200 + CHAN_CONFIRM_DEPTH + 1. - if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 0) { + if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) { assert_eq!(err, "Duplicate payment hash"); } else { panic!(); } let mut block = { @@ -8130,16 +8252,16 @@ fn test_secret_timeout() { } }; connect_block(&nodes[1], &block); - if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 0) { + if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) { assert_eq!(err, "Duplicate payment hash"); } else { panic!(); } // If we then connect the second block, we should be able to register the same payment hash - // again with a different user_payment_id (this time getting a new payment secret). + // again (this time getting a new payment secret). block.header.prev_blockhash = block.header.block_hash(); block.header.time += 1; connect_block(&nodes[1], &block); - let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 42).unwrap(); + let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2).unwrap(); assert_ne!(payment_secret_1, our_payment_secret); { @@ -8157,9 +8279,8 @@ fn test_secret_timeout() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentReceived { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, user_payment_id }, .. } => { + Event::PaymentReceived { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret }, .. } => { assert!(payment_preimage.is_none()); - assert_eq!(user_payment_id, 42); assert_eq!(payment_secret, our_payment_secret); // We don't actually have the payment preimage with which to claim this payment! }, @@ -8179,7 +8300,7 @@ fn test_bad_secret_hash() { let random_payment_hash = PaymentHash([42; 32]); let random_payment_secret = PaymentSecret([43; 32]); - let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, 0); + let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2).unwrap(); let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); // All the below cases should end up being handled exactly identically, so we macro the @@ -9202,3 +9323,175 @@ fn test_keysend_payments_to_private_node() { pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage)); claim_payment(&nodes[0], &path, test_preimage); } + +/// The possible events which may trigger a `max_dust_htlc_exposure` breach +#[derive(Clone, Copy, PartialEq)] +enum ExposureEvent { + /// Breach occurs at HTLC forwarding (see `send_htlc`) + AtHTLCForward, + /// Breach occurs at HTLC reception (see `update_add_htlc`) + AtHTLCReception, + /// Breach occurs at outbound update_fee (see `send_update_fee`) + AtUpdateFeeOutbound, +} + +fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool) { + // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat` + // policy. + // + // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and + // trimmed-to-dust HTLC outbound balance and this new payment as included on next + // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the + // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC + // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included + // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail + // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel + // might be available again for HTLC processing once the dust bandwidth has cleared up. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let mut config = test_default_channel_config(); + config.channel_options.max_dust_htlc_exposure_msat = 5_000_000; // default setting value + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); + let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + open_channel.max_htlc_value_in_flight_msat = 50_000_000; + open_channel.max_accepted_htlcs = 60; + if on_holder_tx { + open_channel.dust_limit_satoshis = 546; + } + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); + let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel); + + let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 1_000_000, 42); + + if on_holder_tx { + if let Some(mut chan) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) { + chan.holder_dust_limit_satoshis = 546; + } + } + + nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap(); + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); + check_added_monitors!(nodes[0], 1); + + let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); + let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked); + update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); + + let dust_buffer_feerate = { + let chan_lock = nodes[0].node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_dust_buffer_feerate(None) as u64 + }; + let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * HTLC_TIMEOUT_TX_WEIGHT / 1000 + open_channel.dust_limit_satoshis - 1) * 1000; + let dust_outbound_htlc_on_holder_tx: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat; + + let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * HTLC_SUCCESS_TX_WEIGHT / 1000 + open_channel.dust_limit_satoshis - 1) * 1000; + let dust_inbound_htlc_on_holder_tx: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat; + + let dust_htlc_on_counterparty_tx: u64 = 25; + let dust_htlc_on_counterparty_tx_msat: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx; + + if on_holder_tx { + if dust_outbound_balance { + // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`) + // Outbound dust balance: 4372 sats + // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats + for i in 0..dust_outbound_htlc_on_holder_tx { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat); + if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); } + } + } else { + // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`) + // Inbound dust balance: 4372 sats + // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats + for _ in 0..dust_inbound_htlc_on_holder_tx { + route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat); + } + } + } else { + if dust_outbound_balance { + // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`) + // Outbound dust balance: 5000 sats + for i in 0..dust_htlc_on_counterparty_tx { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat); + if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); } + } + } else { + // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`) + // Inbound dust balance: 5000 sats + for _ in 0..dust_htlc_on_counterparty_tx { + route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat); + } + } + } + + let dust_overflow = dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx + 1); + if exposure_breach_event == ExposureEvent::AtHTLCForward { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat }); + let mut config = UserConfig::default(); + // With default dust exposure: 5000 sats + if on_holder_tx { + let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * (dust_outbound_htlc_on_holder_tx + 1); + let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * dust_inbound_htlc_on_holder_tx + dust_outbound_htlc_on_holder_tx_msat; + unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_options.max_dust_htlc_exposure_msat))); + } else { + unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_options.max_dust_htlc_exposure_msat))); + } + } else if exposure_breach_event == ExposureEvent::AtHTLCReception { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat }); + nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[1], 1); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.remove(0)); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + // With default dust exposure: 5000 sats + if on_holder_tx { + // Outbound dust balance: 6399 sats + let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1); + let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat; + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_options.max_dust_htlc_exposure_msat), 1); + } else { + // Outbound dust balance: 5200 sats + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_options.max_dust_htlc_exposure_msat), 1); + } + } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 2_500_000); + if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at update_fee-swallowed HTLC", ); } + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = *feerate_lock * 10; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + nodes[0].logger.assert_log_contains("lightning::ln::channel".to_string(), "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure".to_string(), 1); + } + + let _ = nodes[0].node.get_and_clear_pending_msg_events(); + let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); + added_monitors.clear(); +} + +#[test] +fn test_max_dust_htlc_exposure() { + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true); +}