X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=00db1f5ca552bd1942807a9b0f03c5ab53d173dc;hb=6e776d9fb99b90c60ce88d951b9e9fbc2cb35a1a;hp=2ae12c39ec75f89fff610a17e9387098941c84c5;hpb=58539b8440e62dbfe54085949ab6246f4f93cba5;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 2ae12c39..00db1f5c 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -18,13 +18,13 @@ use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PER use chain::transaction::OutPoint; use chain::keysinterface::BaseSign; use ln::{PaymentPreimage, PaymentSecret, PaymentHash}; -use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC}; -use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA}; +use ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT}; +use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, PAYMENT_EXPIRY_BLOCKS }; use ln::channel::{Channel, ChannelError}; use ln::{chan_utils, onion_utils}; -use ln::chan_utils::HTLC_SUCCESS_TX_WEIGHT; -use routing::network_graph::{NetworkUpdate, RoutingFees}; -use routing::router::{Payee, Route, RouteHop, RouteHint, RouteHintHop, RouteParameters, find_route, get_route}; +use ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment}; +use routing::network_graph::RoutingFees; +use routing::router::{PaymentParameters, Route, RouteHop, RouteHint, RouteHintHop, RouteParameters, find_route, get_route}; use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures}; use ln::msgs; use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; @@ -42,9 +42,6 @@ use bitcoin::blockdata::opcodes; use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; - use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::key::{PublicKey,SecretKey}; @@ -108,8 +105,6 @@ fn test_insane_channel_opens() { insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg }); - insane_open_helper(r"Bogus; channel reserve \(\d+\) is less than dust limit \(\d+\)", |mut msg| { msg.dust_limit_satoshis = msg.channel_reserve_satoshis + 1; msg }); - insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg }); insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg }); @@ -119,6 +114,68 @@ fn test_insane_channel_opens() { insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg }); } +fn do_test_counterparty_no_reserve(send_from_initiator: bool) { + // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure, + // but only for them. Because some LSPs do it with some level of trust of the clients (for a + // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often + // in normal testing, we test it explicitly here. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Have node0 initiate a channel to node1 with aforementioned parameters + let mut push_amt = 100_000_000; + let feerate_per_kw = 253; + let opt_anchors = false; + push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; + push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; + + let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap(); + let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + if !send_from_initiator { + open_channel_message.channel_reserve_satoshis = 0; + open_channel_message.max_htlc_value_in_flight_msat = 100_000_000; + } + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_message); + + // Extract the channel accept message from node1 to node0 + let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + if send_from_initiator { + accept_channel_message.channel_reserve_satoshis = 0; + accept_channel_message.max_htlc_value_in_flight_msat = 100_000_000; + } + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel_message); + { + let mut lock; + let mut chan = get_channel_ref!(if send_from_initiator { &nodes[1] } else { &nodes[0] }, lock, temp_channel_id); + chan.holder_selected_channel_reserve_satoshis = 0; + chan.holder_max_htlc_value_in_flight_msat = 100_000_000; + } + + let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id); + let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx); + create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0); + + // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s + // security model if it ever tries to send funds back to nodes[0] (but that's not our problem). + if send_from_initiator { + send_payment(&nodes[0], &[&nodes[1]], 100_000_000 + // Note that for outbound channels we have to consider the commitment tx fee and the + // "fee spike buffer", which is currently a multiple of the total commitment tx fee as + // well as an additional HTLC. + - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, opt_anchors)); + } else { + send_payment(&nodes[1], &[&nodes[0]], push_amt); + } +} + +#[test] +fn test_counterparty_no_reserve() { + do_test_counterparty_no_reserve(true); + do_test_counterparty_no_reserve(false); +} + #[test] fn test_async_inbound_update_fee() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -426,10 +483,54 @@ fn do_test_1_conf_open(connect_style: ConnectStyle) { let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known()); mine_transaction(&nodes[1], &tx); nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id())); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); mine_transaction(&nodes[0], &tx); - let (funding_locked, _) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]); - let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked); + let as_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(as_msg_events.len(), 2); + let as_funding_locked = if let MessageSendEvent::SendFundingLocked { ref node_id, ref msg } = as_msg_events[0] { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + msg.clone() + } else { panic!("Unexpected event"); }; + if let MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } = as_msg_events[1] { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + } else { panic!("Unexpected event"); } + + nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding_locked); + let bs_msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(bs_msg_events.len(), 1); + if let MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } = bs_msg_events[0] { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + } else { panic!("Unexpected event"); } + + send_payment(&nodes[0], &[&nodes[1]], 100_000); + + // After 6 confirmations, as required by the spec, we'll send announcement_signatures and + // broadcast the channel_announcement (but not before exactly 6 confirmations). + connect_blocks(&nodes[0], 4); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + connect_blocks(&nodes[0], 1); + nodes[1].node.handle_announcement_signatures(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendAnnouncementSignatures, nodes[1].node.get_our_node_id())); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + connect_blocks(&nodes[1], 5); + let bs_announce_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(bs_announce_events.len(), 2); + let bs_announcement_sigs = if let MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } = bs_announce_events[0] { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + msg.clone() + } else { panic!("Unexpected event"); }; + let (bs_announcement, bs_update) = if let MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } = bs_announce_events[1] { + (msg.clone(), update_msg.clone()) + } else { panic!("Unexpected event"); }; + + nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs); + let as_announce_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(as_announce_events.len(), 1); + let (announcement, as_update) = if let MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } = as_announce_events[0] { + (msg.clone(), update_msg.clone()) + } else { panic!("Unexpected event"); }; + assert_eq!(announcement, bs_announcement); for node in nodes { assert!(node.net_graph_msg_handler.handle_channel_announcement(&announcement).unwrap()); @@ -584,11 +685,21 @@ fn test_update_fee_that_funder_cannot_afford() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let channel_value = 1888; - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 700000, InitFeatures::known(), InitFeatures::known()); + let channel_value = 5000; + let push_sats = 700; + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000, InitFeatures::known(), InitFeatures::known()); let channel_id = chan.2; + let secp_ctx = Secp256k1::new(); + let bs_channel_reserve_sats = Channel::::get_holder_selected_channel_reserve_satoshis(channel_value); - let feerate = 260; + let opt_anchors = false; + + // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee + // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we + // calculate two different feerates here - the expected local limit as well as the expected + // remote limit. + let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(opt_anchors) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32; + let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(opt_anchors)) as u32; { let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); *feerate_lock = feerate; @@ -601,36 +712,88 @@ fn test_update_fee_that_funder_cannot_afford() { commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false); - //Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate of 260 set above. - //This value results in a fee that is exactly what the funder can afford (277 sat + 1000 sat channel reserve) + // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above. { let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone(); - //We made sure neither party's funds are below the dust limit so -2 non-HTLC txns from number of outputs - let num_htlcs = commitment_tx.output.len() - 2; - let total_fee: u64 = feerate as u64 * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000; + //We made sure neither party's funds are below the dust limit and there are no HTLCs here + assert_eq!(commitment_tx.output.len(), 2); + let total_fee: u64 = commit_tx_fee_msat(feerate, 0, opt_anchors) / 1000; let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value); actual_fee = channel_value - actual_fee; assert_eq!(total_fee, actual_fee); } - //Add 2 to the previous fee rate to the final fee increases by 1 (with no HTLCs the fee is essentially - //fee_rate*(724/1000) so the increment of 1*0.724 is rounded back down) { + // Increment the feerate by a small constant, accounting for rounding errors let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = feerate + 2; + *feerate_lock += 4; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot afford to send new feerate at {}", feerate + 4), 1); + check_added_monitors!(nodes[0], 0); - let update2_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update2_msg.update_fee.unwrap()); + // Get the EnforcingSigner for each channel, which will be used to (1) get the keys + // needed to sign the new commitment tx and (2) sign the new commitment tx. + let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = { + let chan_lock = nodes[0].node.channel_state.lock().unwrap(); + let local_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let chan_signer = local_chan.get_signer(); + let pubkeys = chan_signer.pubkeys(); + (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, + pubkeys.funding_pubkey) + }; + let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = { + let chan_lock = nodes[1].node.channel_state.lock().unwrap(); + let remote_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let chan_signer = remote_chan.get_signer(); + let pubkeys = chan_signer.pubkeys(); + (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, + chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), + pubkeys.funding_pubkey) + }; + + // Assemble the set of keys we can use for signatures for our commitment_signed message. + let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint, + &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap(); + + let res = { + let local_chan_lock = nodes[0].node.channel_state.lock().unwrap(); + let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap(); + let local_chan_signer = local_chan.get_signer(); + let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![]; + let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( + INITIAL_COMMITMENT_NUMBER - 1, + push_sats, + channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, opt_anchors) / 1000, + opt_anchors, local_funding, remote_funding, + commit_tx_keys.clone(), + non_buffer_feerate + 4, + &mut htlcs, + &local_chan.channel_transaction_parameters.as_counterparty_broadcastable() + ); + local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() + }; + + let commit_signed_msg = msgs::CommitmentSigned { + channel_id: chan.2, + signature: res.0, + htlc_signatures: res.1 + }; + + let update_fee = msgs::UpdateFee { + channel_id: chan.2, + feerate_per_kw: non_buffer_feerate + 4, + }; + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee); //While producing the commitment_signed response after handling a received update_fee request the //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) //Should produce and error. - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update2_msg.commitment_signed); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); @@ -918,7 +1081,7 @@ fn fake_network_test() { }); hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; - let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![hops], payee: None }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0; + let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![hops], payment_params: None }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0; let mut hops = Vec::with_capacity(3); hops.push(RouteHop { @@ -947,7 +1110,7 @@ fn fake_network_test() { }); hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; - let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![hops], payee: None }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1; + let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![hops], payment_params: None }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1; // Claim the rebalances... fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2); @@ -1233,7 +1396,7 @@ fn test_basic_channel_reserve() { let channel_reserve = chan_stat.channel_reserve_msat; // The 2* and +1 are for the fee spike reserve. - let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], chan.2), 1 + 1); + let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], chan.2), 1 + 1, get_opt_anchors!(nodes[0], chan.2)); let max_can_send = 5000000 - channel_reserve - commit_tx_fee; let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send + 1); let err = nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).err().unwrap(); @@ -1341,13 +1504,13 @@ fn test_fee_spike_violation_fails_htlc() { commitment_number, 95000, local_chan_balance, - false, local_funding, remote_funding, + local_chan.opt_anchors(), local_funding, remote_funding, commit_tx_keys.clone(), feerate_per_kw, &mut vec![(accepted_htlc_info, ())], &local_chan.channel_transaction_parameters.as_counterparty_broadcastable() ); - local_chan_signer.sign_counterparty_commitment(&commitment_tx, &secp_ctx).unwrap() + local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() }; let commit_signed_msg = msgs::CommitmentSigned { @@ -1391,21 +1554,23 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { // sending any above-dust amount would result in a channel reserve violation. // In this test we check that we would be prevented from sending an HTLC in // this situation. - let feerate_per_kw = 253; - chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) }; - chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) }; + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let opt_anchors = false; + let mut push_amt = 100_000_000; - push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors); push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known()); // Sending exactly enough to hit the reserve amount should be accepted - let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } // However one more HTLC should be significantly over the reserve amount and fail. let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000); @@ -1418,30 +1583,38 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { #[test] fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let mut chanmon_cfgs = create_chanmon_cfgs(2); - // Set the fee rate for the channel very high, to the point where the funder - // receiving 1 update_add_htlc would result in them closing the channel due - // to channel reserve violation. This close could also happen if the fee went - // up a more realistic amount, but many HTLCs were outstanding at the time of - // the update_add_htlc. - chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(6000) }; - chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(6000) }; + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000); + let opt_anchors = false; + + // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a + // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment + // transaction fee with 0 HTLCs (183 sats)). + let mut push_amt = 100_000_000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors); + push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known()); + + // Send four HTLCs to cover the initial push_msat buffer we're required to include + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } + + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 700_000); // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() let secp_ctx = Secp256k1::new(); let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1; let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap(); - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 1000, &Some(payment_secret), cur_height, &None).unwrap(); + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 700_000, &Some(payment_secret), cur_height, &None).unwrap(); let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash); let msg = msgs::UpdateAddHTLC { channel_id: chan.2, - htlc_id: 1, - amount_msat: htlc_msat + 1, + htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64, + amount_msat: htlc_msat, payment_hash: payment_hash, cltv_expiry: htlc_cltv, onion_routing_packet: onion_packet, @@ -1462,35 +1635,77 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { // Test that if we receive many dust HTLCs over an outbound channel, they don't count when // calculating our commitment transaction fee (this was previously broken). let mut chanmon_cfgs = create_chanmon_cfgs(2); - let feerate_per_kw = 253; - chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) }; - chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) }; + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let opt_anchors = false; + // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment // transaction fee with 0 HTLCs (183 sats)). let mut push_amt = 100_000_000; - push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT) / 1000 * 1000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors); push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt, InitFeatures::known(), InitFeatures::known()); let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 - + feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000 * 1000 - 1; + + feerate_per_kw as u64 * htlc_success_tx_weight(opt_anchors) / 1000 * 1000 - 1; // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the // commitment transaction fee. let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt); + // Send four HTLCs to cover the initial push_msat buffer we're required to include + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } + // One more than the dust amt should fail, however. let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt + 1); unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value")); } +#[test] +fn test_chan_init_feerate_unaffordability() { + // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to + // channel reserve and feerate requirements. + let mut chanmon_cfgs = create_chanmon_cfgs(2); + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let opt_anchors = false; + + // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single + // HTLC. + let mut push_amt = 100_000_000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors); + assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None).unwrap_err(), + APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() }); + + // During open, we don't have a "counterparty channel reserve" to check against, so that + // requirement only comes into play on the open_channel handling side. + push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000) * 1000; + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap(); + let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + open_channel_msg.push_msat += 1; + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_msg); + + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + match msg_events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { + assert_eq!(msg.data, "Insufficient funding amount for initial reserve"); + }, + _ => panic!("Unexpected event"), + } +} + #[test] fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { // Test that if we receive many dust HTLCs over an inbound channel, they don't count when @@ -1534,9 +1749,10 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; let chan_stat = get_channel_value_stat!(nodes[0], chan.2); let feerate = get_feerate!(nodes[0], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], chan.2); // Add a 2* and +1 for the fee spike reserve. - let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1); + let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors); let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2; let amt_msat_1 = recv_value_1 + total_routing_fee_msat; @@ -1553,7 +1769,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]); // Attempt to trigger a channel reserve violation --> payment failure. - let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2); + let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, opt_anchors); let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1; let amt_msat_2 = recv_value_2 + total_routing_fee_msat; let (route_2, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_2); @@ -1604,8 +1820,8 @@ fn test_inbound_outbound_capacity_is_not_zero() { assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000); } -fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64) -> u64 { - (COMMITMENT_TX_BASE_WEIGHT + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000 +fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, opt_anchors: bool) -> u64 { + (commitment_tx_base_weight(opt_anchors) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000 } #[test] @@ -1640,6 +1856,7 @@ fn test_channel_reserve_holding_cell_htlcs() { let feemsat = 239; // set above let total_fee_msat = (nodes.len() - 2) as u64 * feemsat; let feerate = get_feerate!(nodes[0], chan_1.2); + let opt_anchors = get_opt_anchors!(nodes[0], chan_1.2); let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat; @@ -1661,7 +1878,7 @@ fn test_channel_reserve_holding_cell_htlcs() { // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve. // Also, ensure that each payment has enough to be over the dust limit to // ensure it'll be included in each commit tx fee calculation. - let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1); + let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, opt_anchors); let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000); if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat { break; @@ -1693,7 +1910,7 @@ fn test_channel_reserve_holding_cell_htlcs() { // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee // policy. - let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1); + let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors); let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2; let amt_msat_1 = recv_value_1 + total_fee_msat; @@ -1718,7 +1935,7 @@ fn test_channel_reserve_holding_cell_htlcs() { } // split the rest to test holding cell - let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1); + let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, opt_anchors); let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs; let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2; let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat; @@ -1829,11 +2046,11 @@ fn test_channel_reserve_holding_cell_htlcs() { claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21); claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22); - let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1); + let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, opt_anchors); let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat; send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3); - let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1); + let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat); let stat0 = get_channel_value_stat!(nodes[0], chan_1.2); assert_eq!(stat0.value_to_self_msat, expected_value_to_self); @@ -1904,7 +2121,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - expect_payment_sent!(nodes[0], payment_preimage_1); + expect_payment_sent_without_paths!(nodes[0], payment_preimage_1); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg); @@ -1933,7 +2150,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - expect_payment_sent!(nodes[0], payment_preimage_2); + expect_payment_sent_without_paths!(nodes[0], payment_preimage_2); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); @@ -1946,6 +2163,7 @@ fn channel_reserve_in_flight_removes() { // resolve the second HTLC from A's point of view. nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back @@ -1975,6 +2193,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); @@ -2306,7 +2525,7 @@ fn revoked_output_claim() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx check_spends!(node_txn[0], revoked_local_txn[0]); @@ -2654,7 +2873,7 @@ fn test_htlc_on_chain_success() { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 3); + assert_eq!(events.len(), 5); let mut first_claimed = false; for event in events { match event { @@ -2667,6 +2886,7 @@ fn test_htlc_on_chain_success() { assert_eq!(payment_hash, payment_hash_2); } }, + Event::PaymentPathSuccessful { .. } => {}, Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, _ => panic!("Unexpected event"), } @@ -2986,9 +3206,10 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use mine_transaction(&nodes[1], &revoked_local_txn[0]); check_added_monitors!(nodes[1], 1); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + assert!(ANTI_REORG_DELAY > PAYMENT_EXPIRY_BLOCKS); // We assume payments will also expire let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 }); + assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 4 }); match events[0] { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { }, _ => panic!("Unexepected event"), @@ -3001,6 +3222,12 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use } if !deliver_bs_raa { match events[2] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(*payment_hash, fourth_payment_hash); + }, + _ => panic!("Unexpected event"), + } + match events[3] { Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; @@ -3301,13 +3528,13 @@ fn test_dup_events_on_peer_disconnect() { check_added_monitors!(nodes[1], 1); let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]); - expect_payment_sent!(nodes[0], payment_preimage); + expect_payment_sent_without_paths!(nodes[0], payment_preimage); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); - assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + expect_payment_path_successful!(nodes[0]); } #[test] @@ -3347,7 +3574,7 @@ fn test_simple_peer_disconnect() { reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false)); { let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); + assert_eq!(events.len(), 3); match events[0] { Event::PaymentSent { payment_preimage, payment_hash, .. } => { assert_eq!(payment_preimage, payment_preimage_3); @@ -3362,6 +3589,10 @@ fn test_simple_peer_disconnect() { }, _ => panic!("Unexpected event"), } + match events[2] { + Event::PaymentPathSuccessful { .. } => {}, + _ => panic!("Unexpected event"), + } } claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4); @@ -3551,15 +3782,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken if messages_delivered < 2 { reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); if messages_delivered < 1 { - let events_4 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_4.len(), 1); - match events_4[0] { - Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => { - assert_eq!(payment_preimage_1, *payment_preimage); - assert_eq!(payment_hash_1, *payment_hash); - }, - _ => panic!("Unexpected event"), - } + expect_payment_sent!(nodes[0], payment_preimage_1); } else { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -3577,10 +3800,18 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } + if messages_delivered == 1 || messages_delivered == 2 { + expect_payment_path_successful!(nodes[0]); + } + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + if messages_delivered > 2 { + expect_payment_path_successful!(nodes[0]); + } + // Channel should still work fine... let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0; @@ -3620,15 +3851,7 @@ fn test_funding_peer_disconnect() { confirm_transaction(&nodes[0], &tx); let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); - let chan_id; - assert_eq!(events_1.len(), 1); - match events_1[0] { - MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - chan_id = msg.channel_id; - }, - _ => panic!("Unexpected event"), - } + assert!(events_1.is_empty()); reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); @@ -3637,53 +3860,106 @@ fn test_funding_peer_disconnect() { confirm_transaction(&nodes[1], &tx); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events_2.len(), 2); - let funding_locked = match events_2[0] { + assert!(events_2.is_empty()); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + // nodes[0] hasn't yet received a funding_locked, so it only sends that on reconnect. + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); + let events_3 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_3.len(), 1); + let as_funding_locked = match events_3[0] { + MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + msg.clone() + }, + _ => panic!("Unexpected event {:?}", events_3[0]), + }; + + // nodes[1] received nodes[0]'s funding_locked on the first reconnect above, so it should send + // announcement_signatures as well as channel_update. + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish); + let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_4.len(), 3); + let chan_id; + let bs_funding_locked = match events_4[0] { MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + chan_id = msg.channel_id; msg.clone() }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event {:?}", events_4[0]), }; - let bs_announcement_sigs = match events_2[1] { + let bs_announcement_sigs = match events_4[1] { MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); msg.clone() }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event {:?}", events_4[1]), }; + match events_4[2] { + MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + }, + _ => panic!("Unexpected event {:?}", events_4[2]), + } - reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + // Re-deliver nodes[0]'s funding_locked, which nodes[1] can safely ignore. It currently + // generates a duplicative private channel_update + nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding_locked); + let events_5 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + }, + _ => panic!("Unexpected event {:?}", events_5[0]), + }; - nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &funding_locked); - nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs); - let events_3 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_3.len(), 2); - let as_announcement_sigs = match events_3[0] { + // When we deliver nodes[1]'s funding_locked, however, nodes[0] will generate its + // announcement_signatures. + nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding_locked); + let events_6 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_6.len(), 1); + let as_announcement_sigs = match events_6[0] { MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => { assert_eq!(*node_id, nodes[1].node.get_our_node_id()); msg.clone() }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event {:?}", events_6[0]), }; - let (as_announcement, as_update) = match events_3[1] { + + // When we deliver nodes[1]'s announcement_signatures to nodes[0], nodes[0] should immediately + // broadcast the channel announcement globally, as well as re-send its (now-public) + // channel_update. + nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs); + let events_7 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_7.len(), 1); + let (chan_announcement, as_update) = match events_7[0] { MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { (msg.clone(), update_msg.clone()) }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event {:?}", events_7[0]), }; + // Finally, deliver nodes[0]'s announcement_signatures to nodes[1] and make sure it creates the + // same channel_announcement. nodes[1].node.handle_announcement_signatures(&nodes[0].node.get_our_node_id(), &as_announcement_sigs); - let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events_4.len(), 1); - let (_, bs_update) = match events_4[0] { + let events_8 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_8.len(), 1); + let bs_update = match events_8[0] { MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { - (msg.clone(), update_msg.clone()) + assert_eq!(*msg, chan_announcement); + update_msg.clone() }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event {:?}", events_8[0]), }; - nodes[0].net_graph_msg_handler.handle_channel_announcement(&as_announcement).unwrap(); + // Provide the channel announcement and public updates to the network graph + nodes[0].net_graph_msg_handler.handle_channel_announcement(&chan_announcement).unwrap(); nodes[0].net_graph_msg_handler.handle_channel_update(&bs_update).unwrap(); nodes[0].net_graph_msg_handler.handle_channel_update(&as_update).unwrap(); @@ -3731,14 +4007,14 @@ fn test_funding_peer_disconnect() { reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - // as_announcement should be re-generated exactly by broadcast_node_announcement. + // The channel announcement should be re-generated exactly by broadcast_node_announcement. nodes[0].node.broadcast_node_announcement([0, 0, 0], [0; 32], Vec::new()); let msgs = nodes[0].node.get_and_clear_pending_msg_events(); let mut found_announcement = false; for event in msgs.iter() { match event { MessageSendEvent::BroadcastChannelAnnouncement { ref msg, .. } => { - if *msg == as_announcement { found_announcement = true; } + if *msg == chan_announcement { found_announcement = true; } }, MessageSendEvent::BroadcastNodeAnnouncement { .. } => {}, _ => panic!("Unexpected event"), @@ -3892,6 +4168,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } @@ -3911,7 +4188,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { // indicates there are more HTLCs coming. let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. let payment_id = PaymentId([42; 32]); - nodes[0].node.send_payment_along_path(&route.paths[0], &route.payee, &our_payment_hash, &Some(payment_secret), 200000, cur_height, payment_id, &None).unwrap(); + nodes[0].node.send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200000, cur_height, payment_id, &None).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4013,7 +4290,14 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { } expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false); } else { - expect_payment_failed!(nodes[1], second_payment_hash, true); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + if let Event::PaymentPathFailed { ref payment_hash, .. } = events[0] { + assert_eq!(*payment_hash, second_payment_hash); + } else { panic!("Unexpected event"); } + if let Event::PaymentFailed { ref payment_hash, .. } = events[1] { + assert_eq!(*payment_hash, second_payment_hash); + } else { panic!("Unexpected event"); } } } @@ -4445,12 +4729,12 @@ fn test_claim_sizeable_push_msat() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known()); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known()); nodes[1].node.force_close_channel(&chan.2).unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], chan.3); assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening @@ -4474,7 +4758,7 @@ fn test_claim_on_remote_sizeable_push_msat() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known()); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known()); nodes[0].node.force_close_channel(&chan.2).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -4518,7 +4802,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() { check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); mine_transaction(&nodes[1], &node_txn[0]); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); @@ -4561,7 +4845,7 @@ fn test_static_spendable_outputs_preimage_tx() { } // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelManager : 2 (local commitment tx + HTLC-Success), ChannelMonitor: preimage tx + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (local commitment tx + HTLC-Success), ChannelMonitor: preimage tx assert_eq!(node_txn.len(), 3); check_spends!(node_txn[0], commitment_tx[0]); assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); @@ -4647,7 +4931,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 2); assert_eq!(node_txn[0].input.len(), 2); check_spends!(node_txn[0], revoked_local_txn[0]); @@ -4700,7 +4984,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 3); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs, ChannelManager: local commitment tx // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0] // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid @@ -4756,7 +5040,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); - let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(revoked_htlc_txn.len(), 2); assert_eq!(revoked_htlc_txn[0].input.len(), 1); @@ -4774,7 +5058,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0] @@ -4915,7 +5199,7 @@ fn test_onchain_to_onchain_claim() { let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2); mine_transaction(&nodes[1], &commitment_tx[0]); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); - let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx assert_eq!(b_txn.len(), 3); check_spends!(b_txn[1], chan_1.3); @@ -5559,24 +5843,16 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 }); + let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 }); // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being // present in B's local commitment transaction, but none of A's commitment transactions. - assert!(nodes[1].node.claim_funds(our_payment_preimage)); + assert!(nodes[1].node.claim_funds(payment_preimage)); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { payment_preimage, payment_hash, .. } => { - assert_eq!(payment_preimage, our_payment_preimage); - assert_eq!(payment_hash, our_payment_hash); - }, - _ => panic!("Unexpected event"), - } + expect_payment_sent_without_paths!(nodes[0], payment_preimage); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); check_added_monitors!(nodes[0], 1); @@ -5677,7 +5953,14 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); } else { - expect_payment_failed!(nodes[0], our_payment_hash, true); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + if let Event::PaymentPathFailed { ref payment_hash, .. } = events[0] { + assert_eq!(*payment_hash, our_payment_hash); + } else { panic!("Unexpected event"); } + if let Event::PaymentFailed { ref payment_hash, .. } = events[1] { + assert_eq!(*payment_hash, our_payment_hash); + } else { panic!("Unexpected event"); } } } @@ -5718,7 +6001,7 @@ fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on i let nodes = create_network(2, &node_cfgs, &node_chanmgrs); //Force duplicate channel ids for node in nodes.iter() { - *node.keys_manager.override_channel_id_priv.lock().unwrap() = Some([0; 32]); + *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]); } // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer. @@ -5727,9 +6010,10 @@ fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on i nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap(); let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &node0_to_1_send_open_channel); + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); //Create a second channel with a channel_id collision - assert!(nodes[0].node.create_channel(nodes[0].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err()); + assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err()); } #[test] @@ -5837,9 +6121,10 @@ fn test_fail_holding_cell_htlc_upon_free() { let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; let feerate = get_feerate!(nodes[0], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], chan.2); // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. - let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1); + let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); // Send a payment which passes reserve checks but gets stuck in the holding cell. @@ -5916,10 +6201,11 @@ fn test_free_and_fail_holding_cell_htlcs() { let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; let feerate = get_feerate!(nodes[0], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], chan.2); // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. let amt_1 = 20000; - let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1) - amt_1; + let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors) - amt_1; let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1); let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2); @@ -5998,15 +6284,7 @@ fn test_free_and_fail_holding_cell_htlcs() { let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => { - assert_eq!(*payment_preimage, payment_preimage_1); - assert_eq!(*payment_hash, payment_hash_1); - } - _ => panic!("Unexpected event"), - } + expect_payment_sent!(nodes[0], payment_preimage_1); } // Test that if we fail to forward an HTLC that is being freed from the holding cell that the @@ -6049,11 +6327,12 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { let mut chan_stat = get_channel_value_stat!(nodes[0], chan_0_1.2); let channel_reserve = chan_stat.channel_reserve_msat; let feerate = get_feerate!(nodes[0], chan_0_1.2); + let opt_anchors = get_opt_anchors!(nodes[0], chan_0_1.2); // Send a payment which passes reserve checks but gets stuck in the holding cell. let feemsat = 239; let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; - let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1) - total_routing_fee_msat; + let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors) - total_routing_fee_msat; let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send); let payment_event = { nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); @@ -6328,8 +6607,9 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let chan_stat = get_channel_value_stat!(nodes[0], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; let feerate = get_feerate!(nodes[0], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], chan.2); // The 2* and +1 are for the fee spike reserve. - let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1); + let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound; let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); @@ -6993,7 +7273,7 @@ fn test_user_configurable_csv_delay() { nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.to_self_delay = 200; - if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &low_our_to_self_config, 0) { + if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &low_our_to_self_config, 0, &nodes[0].logger) { match error { ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); }, _ => panic!("Unexpected event"), @@ -7022,7 +7302,7 @@ fn test_user_configurable_csv_delay() { nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.to_self_delay = 200; - if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &high_their_to_self_config, 0) { + if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &high_their_to_self_config, 0, &nodes[0].logger) { match error { ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); }, _ => panic!("Unexpected event"), @@ -7160,9 +7440,9 @@ fn test_check_htlc_underpaying() { // Create some initial channels create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let scorer = test_utils::TestScorer::with_fixed_penalty(0); - let payee = Payee::from_node_id(nodes[1].node.get_our_node_id()).with_features(InvoiceFeatures::known()); - let route = get_route(&nodes[0].node.get_our_node_id(), &payee, nodes[0].network_graph, None, 10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer).unwrap(); + let scorer = test_utils::TestScorer::with_penalty(0); + let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id()).with_features(InvoiceFeatures::known()); + let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, nodes[0].network_graph, None, 10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer).unwrap(); let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]); let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200).unwrap(); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); @@ -7217,9 +7497,9 @@ fn test_announce_disable_channels() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let short_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let short_id_2 = create_announced_chan_between_nodes(&nodes, 1, 0, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let short_id_3 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes(&nodes, 1, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); // Disconnect peers nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); @@ -7229,13 +7509,13 @@ fn test_announce_disable_channels() { nodes[0].node.timer_tick_occurred(); // DisabledStaged -> Disabled let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); - let mut chans_disabled: HashSet = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect(); + let mut chans_disabled = HashMap::new(); for e in msg_events { match e { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set // Check that each channel gets updated exactly once - if !chans_disabled.remove(&msg.contents.short_channel_id) { + if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() { panic!("Generated ChannelUpdate for wrong chan!"); } }, @@ -7271,19 +7551,22 @@ fn test_announce_disable_channels() { nodes[0].node.timer_tick_occurred(); let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); - chans_disabled = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect(); for e in msg_events { match e { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off - // Check that each channel gets updated exactly once - if !chans_disabled.remove(&msg.contents.short_channel_id) { - panic!("Generated ChannelUpdate for wrong chan!"); + match chans_disabled.remove(&msg.contents.short_channel_id) { + // Each update should have a higher timestamp than the previous one, replacing + // the old one. + Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp), + None => panic!("Generated ChannelUpdate for wrong chan!"), } }, _ => panic!("Unexpected event"), } } + // Check that each channel gets updated exactly once + assert!(chans_disabled.is_empty()); } #[test] @@ -7559,13 +7842,13 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known()); // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps) - let payee = Payee::from_node_id(nodes[1].node.get_our_node_id()).with_features(InvoiceFeatures::known()); - let scorer = test_utils::TestScorer::with_fixed_penalty(0); - let route = get_route(&nodes[0].node.get_our_node_id(), &payee, &nodes[0].network_graph, None, + let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id()).with_features(InvoiceFeatures::known()); + let scorer = test_utils::TestScorer::with_penalty(0); + let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph, None, 3_000_000, 50, nodes[0].logger, &scorer).unwrap(); let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0; - let payee = Payee::from_node_id(nodes[0].node.get_our_node_id()).with_features(InvoiceFeatures::known()); - let route = get_route(&nodes[1].node.get_our_node_id(), &payee, nodes[1].network_graph, None, + let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id()).with_features(InvoiceFeatures::known()); + let route = get_route(&nodes[1].node.get_our_node_id(), &payment_params, nodes[1].network_graph, None, 3_000_000, 50, nodes[0].logger, &scorer).unwrap(); send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000); @@ -7908,6 +8191,43 @@ fn test_bump_txn_sanitize_tracking_maps() { } } +#[test] +fn test_pending_claimed_htlc_no_balance_underflow() { + // Tests that if we have a pending outbound HTLC as well as a claimed-but-not-fully-removed + // HTLC we will not underflow when we call `Channel::get_balance_msat()`. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + + let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1_010_000).0; + nodes[1].node.claim_funds(payment_preimage); + check_added_monitors!(nodes[1], 1); + let fulfill_ev = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &fulfill_ev.update_fulfill_htlcs[0]); + expect_payment_sent_without_paths!(nodes[0], payment_preimage); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &fulfill_ev.commitment_signed); + check_added_monitors!(nodes[0], 1); + let (_raa, _cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + // At this point nodes[1] has received 1,010k msat (10k msat more than their reserve) and can + // send an HTLC back (though it will go in the holding cell). Send an HTLC back and check we + // can get our balance. + + // Get a route from nodes[1] to nodes[0] by getting a route going the other way and then flip + // the public key of the only hop. This works around ChannelDetails not showing the + // almost-claimed HTLC as available balance. + let (mut route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 10_000); + route.payment_params = None; // This is all wrong, but unnecessary + route.paths[0][0].pubkey = nodes[0].node.get_our_node_id(); + let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]); + nodes[1].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); + + assert_eq!(nodes[1].node.list_channels()[0].balance_msat, 1_000_000); +} + #[test] fn test_channel_conf_timeout() { // Tests that, for inbound channels, we give up on them if the funding transaction does not @@ -7981,6 +8301,207 @@ fn test_override_0msat_htlc_minimum() { assert_eq!(res.htlc_minimum_msat, 1); } +#[test] +fn test_manually_accept_inbound_channel_request() { + let mut manually_accept_conf = UserConfig::default(); + manually_accept_conf.manually_accept_inbound_channels = true; + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap(); + let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &res); + + // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before + // accepting the inbound channel request. + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id, .. } => { + nodes[1].node.accept_inbound_channel(&temporary_channel_id).unwrap(); + } + _ => panic!("Unexpected event"), + } + + let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(accept_msg_ev.len(), 1); + + match accept_msg_ev[0] { + MessageSendEvent::SendAcceptChannel { ref node_id, .. } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + } + _ => panic!("Unexpected event"), + } +} + +#[test] +fn test_manually_reject_inbound_channel_request() { + let mut manually_accept_conf = UserConfig::default(); + manually_accept_conf.manually_accept_inbound_channels = true; + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap(); + let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &res); + + // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before + // rejecting the inbound channel request. + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id, .. } => { + nodes[1].node.force_close_channel(&temporary_channel_id).unwrap(); + } + _ => panic!("Unexpected event"), + } + + let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(close_msg_ev.len(), 1); + + match close_msg_ev[0] { + MessageSendEvent::HandleError { ref node_id, .. } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + } + _ => panic!("Unexpected event"), + } + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); +} + +#[test] +fn test_reject_funding_before_inbound_channel_accepted() { + // This tests that when `UserConfig::manually_accept_inbound_channels` is set to true, inbound + // channels must to be manually accepted through `ChannelManager::accept_inbound_channel` by + // the node operator before the counterparty sends a `FundingCreated` message. If a + // `FundingCreated` message is received before the channel is accepted, it should be rejected + // and the channel should be closed. + let mut manually_accept_conf = UserConfig::default(); + manually_accept_conf.manually_accept_inbound_channels = true; + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap(); + let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let temp_channel_id = res.temporary_channel_id; + + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &res); + + // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in the `msg_events`. + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Clear the `Event::OpenChannelRequest` event without responding to the request. + nodes[1].node.get_and_clear_pending_events(); + + // Get the `AcceptChannel` message of `nodes[1]` without calling + // `ChannelManager::accept_inbound_channel`, which generates a + // `MessageSendEvent::SendAcceptChannel` event. The message is passed to `nodes[0]` + // `handle_accept_channel`, which is required in order for `create_funding_transaction` to + // succeed when `nodes[0]` is passed to it. + { + let mut lock; + let channel = get_channel_ref!(&nodes[1], lock, temp_channel_id); + let accept_chan_msg = channel.get_accept_channel_message(); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg); + } + + let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 100000, 42); + + nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap(); + let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + + // The `funding_created_msg` should be rejected by `nodes[1]` as it hasn't accepted the channel + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); + + let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(close_msg_ev.len(), 1); + + let expected_err = "FundingCreated message received before the channel was accepted"; + match close_msg_ev[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, ref node_id, } => { + assert_eq!(msg.channel_id, temp_channel_id); + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(msg.data, expected_err); + } + _ => panic!("Unexpected event"), + } + + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() }); +} + +#[test] +fn test_can_not_accept_inbound_channel_twice() { + let mut manually_accept_conf = UserConfig::default(); + manually_accept_conf.manually_accept_inbound_channels = true; + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap(); + let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &res); + + // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before + // accepting the inbound channel request. + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id, .. } => { + nodes[1].node.accept_inbound_channel(&temporary_channel_id).unwrap(); + let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id); + match api_res { + Err(APIError::APIMisuseError { err }) => { + assert_eq!(err, "The channel isn't currently awaiting to be accepted."); + }, + Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"), + Err(_) => panic!("Unexpected Error"), + } + } + _ => panic!("Unexpected event"), + } + + // Ensure that the channel wasn't closed after attempting to accept it twice. + let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(accept_msg_ev.len(), 1); + + match accept_msg_ev[0] { + MessageSendEvent::SendAcceptChannel { ref node_id, .. } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + } + _ => panic!("Unexpected event"), + } +} + +#[test] +fn test_can_not_accept_unknown_inbound_channel() { + let chanmon_cfg = create_chanmon_cfgs(1); + let node_cfg = create_node_cfgs(1, &chanmon_cfg); + let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]); + let node = create_network(1, &node_cfg, &node_chanmgr)[0].node; + + let unknown_channel_id = [0; 32]; + let api_res = node.accept_inbound_channel(&unknown_channel_id); + match api_res { + Err(APIError::ChannelUnavailable { err }) => { + assert_eq!(err, "Can't accept a channel that doesn't exist"); + }, + Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"), + Err(_) => panic!("Unexpected Error"), + } +} + #[test] fn test_simple_mpp() { // Simple test of sending a multi-path payment. @@ -8018,7 +8539,7 @@ fn test_preimage_storage() { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; { - let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200); + let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200).unwrap(); let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -8046,8 +8567,10 @@ fn test_preimage_storage() { } #[test] +#[allow(deprecated)] fn test_secret_timeout() { - // Simple test of payment secret storage time outs + // Simple test of payment secret storage time outs. After + // `create_inbound_payment(_for_hash)_legacy` is removed, this test will be removed as well. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -8055,11 +8578,11 @@ fn test_secret_timeout() { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment(Some(100_000), 2); + let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment_legacy(Some(100_000), 2).unwrap(); // We should fail to register the same payment hash twice, at least until we've connected a // block with time 7200 + CHAN_CONFIRM_DEPTH + 1. - if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2) { + if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) { assert_eq!(err, "Duplicate payment hash"); } else { panic!(); } let mut block = { @@ -8074,7 +8597,7 @@ fn test_secret_timeout() { } }; connect_block(&nodes[1], &block); - if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2) { + if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) { assert_eq!(err, "Duplicate payment hash"); } else { panic!(); } @@ -8083,7 +8606,7 @@ fn test_secret_timeout() { block.header.prev_blockhash = block.header.block_hash(); block.header.time += 1; connect_block(&nodes[1], &block); - let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2).unwrap(); + let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2).unwrap(); assert_ne!(payment_secret_1, our_payment_secret); { @@ -8122,7 +8645,7 @@ fn test_bad_secret_hash() { let random_payment_hash = PaymentHash([42; 32]); let random_payment_secret = PaymentSecret([43; 32]); - let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2); + let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2).unwrap(); let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); // All the below cases should end up being handled exactly identically, so we macro the @@ -8490,7 +9013,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain }; let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]}); - let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); if broadcast_alice { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); @@ -9079,6 +9602,77 @@ fn test_forwardable_regen() { claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); } +#[test] +fn test_dup_htlc_second_fail_panic() { + // Previously, if we received two HTLCs back-to-back, where the second overran the expected + // value for the payment, we'd fail back both HTLCs after generating a `PaymentReceived` event. + // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed + // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known()); + + let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id()) + .with_features(InvoiceFeatures::known()); + let scorer = test_utils::TestScorer::with_penalty(0); + let route = get_route( + &nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph, + Some(&nodes[0].node.list_usable_channels().iter().collect::>()), + 10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer).unwrap(); + + let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]); + + { + nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let mut payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + } + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], our_payment_hash, our_payment_secret, 10_000); + + { + nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let mut payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + // At this point, nodes[1] would notice it has too much value for the payment. It will + // assume the second is a privacy attack (no longer particularly relevant + // post-payment_secrets) and fail back the new HTLC. Previously, it'd also have failed back + // the first HTLC delivered above. + } + + // Now we go fail back the first HTLC from the user end. + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); + nodes[1].node.fail_htlc_backwards(&our_payment_hash); + + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); + + check_added_monitors!(nodes[1], 1); + let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]); + commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); + + let failure_events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(failure_events.len(), 2); + if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); } + if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); } +} + #[test] fn test_keysend_payments_to_public_node() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -9090,13 +9684,13 @@ fn test_keysend_payments_to_public_node() { let network_graph = nodes[0].network_graph; let payer_pubkey = nodes[0].node.get_our_node_id(); let payee_pubkey = nodes[1].node.get_our_node_id(); - let params = RouteParameters { - payee: Payee::for_keysend(payee_pubkey), + let route_params = RouteParameters { + payment_params: PaymentParameters::for_keysend(payee_pubkey), final_value_msat: 10000, final_cltv_expiry_delta: 40, }; - let scorer = test_utils::TestScorer::with_fixed_penalty(0); - let route = find_route(&payer_pubkey, ¶ms, network_graph, None, nodes[0].logger, &scorer).unwrap(); + let scorer = test_utils::TestScorer::with_penalty(0); + let route = find_route(&payer_pubkey, &route_params, network_graph, None, nodes[0].logger, &scorer).unwrap(); let test_preimage = PaymentPreimage([42; 32]); let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap(); @@ -9122,16 +9716,16 @@ fn test_keysend_payments_to_private_node() { nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() }); let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); - let params = RouteParameters { - payee: Payee::for_keysend(payee_pubkey), + let route_params = RouteParameters { + payment_params: PaymentParameters::for_keysend(payee_pubkey), final_value_msat: 10000, final_cltv_expiry_delta: 40, }; let network_graph = nodes[0].network_graph; let first_hops = nodes[0].node.list_usable_channels(); - let scorer = test_utils::TestScorer::with_fixed_penalty(0); + let scorer = test_utils::TestScorer::with_penalty(0); let route = find_route( - &payer_pubkey, ¶ms, network_graph, Some(&first_hops.iter().collect::>()), + &payer_pubkey, &route_params, network_graph, Some(&first_hops.iter().collect::>()), nodes[0].logger, &scorer ).unwrap(); @@ -9145,3 +9739,177 @@ fn test_keysend_payments_to_private_node() { pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage)); claim_payment(&nodes[0], &path, test_preimage); } + +/// The possible events which may trigger a `max_dust_htlc_exposure` breach +#[derive(Clone, Copy, PartialEq)] +enum ExposureEvent { + /// Breach occurs at HTLC forwarding (see `send_htlc`) + AtHTLCForward, + /// Breach occurs at HTLC reception (see `update_add_htlc`) + AtHTLCReception, + /// Breach occurs at outbound update_fee (see `send_update_fee`) + AtUpdateFeeOutbound, +} + +fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool) { + // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat` + // policy. + // + // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and + // trimmed-to-dust HTLC outbound balance and this new payment as included on next + // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the + // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC + // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included + // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail + // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel + // might be available again for HTLC processing once the dust bandwidth has cleared up. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let mut config = test_default_channel_config(); + config.channel_options.max_dust_htlc_exposure_msat = 5_000_000; // default setting value + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); + let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + open_channel.max_htlc_value_in_flight_msat = 50_000_000; + open_channel.max_accepted_htlcs = 60; + if on_holder_tx { + open_channel.dust_limit_satoshis = 546; + } + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); + let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel); + + let opt_anchors = false; + + let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 1_000_000, 42); + + if on_holder_tx { + if let Some(mut chan) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) { + chan.holder_dust_limit_satoshis = 546; + } + } + + nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap(); + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); + check_added_monitors!(nodes[0], 1); + + let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); + let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked); + update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); + + let dust_buffer_feerate = { + let chan_lock = nodes[0].node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_dust_buffer_feerate(None) as u64 + }; + let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000; + let dust_outbound_htlc_on_holder_tx: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat; + + let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000; + let dust_inbound_htlc_on_holder_tx: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat; + + let dust_htlc_on_counterparty_tx: u64 = 25; + let dust_htlc_on_counterparty_tx_msat: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx; + + if on_holder_tx { + if dust_outbound_balance { + // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`) + // Outbound dust balance: 4372 sats + // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats + for i in 0..dust_outbound_htlc_on_holder_tx { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat); + if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); } + } + } else { + // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`) + // Inbound dust balance: 4372 sats + // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats + for _ in 0..dust_inbound_htlc_on_holder_tx { + route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat); + } + } + } else { + if dust_outbound_balance { + // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`) + // Outbound dust balance: 5000 sats + for i in 0..dust_htlc_on_counterparty_tx { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat); + if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); } + } + } else { + // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`) + // Inbound dust balance: 5000 sats + for _ in 0..dust_htlc_on_counterparty_tx { + route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat); + } + } + } + + let dust_overflow = dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx + 1); + if exposure_breach_event == ExposureEvent::AtHTLCForward { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat }); + let mut config = UserConfig::default(); + // With default dust exposure: 5000 sats + if on_holder_tx { + let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * (dust_outbound_htlc_on_holder_tx + 1); + let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * dust_inbound_htlc_on_holder_tx + dust_outbound_htlc_on_holder_tx_msat; + unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_options.max_dust_htlc_exposure_msat))); + } else { + unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_options.max_dust_htlc_exposure_msat))); + } + } else if exposure_breach_event == ExposureEvent::AtHTLCReception { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat }); + nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[1], 1); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.remove(0)); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + // With default dust exposure: 5000 sats + if on_holder_tx { + // Outbound dust balance: 6399 sats + let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1); + let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat; + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_options.max_dust_htlc_exposure_msat), 1); + } else { + // Outbound dust balance: 5200 sats + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_options.max_dust_htlc_exposure_msat), 1); + } + } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound { + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 2_500_000); + if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at update_fee-swallowed HTLC", ); } + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = *feerate_lock * 10; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + nodes[0].logger.assert_log_contains("lightning::ln::channel".to_string(), "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure".to_string(), 1); + } + + let _ = nodes[0].node.get_and_clear_pending_msg_events(); + let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); + added_monitors.clear(); +} + +#[test] +fn test_max_dust_htlc_exposure() { + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true); +}