use chain::channelmonitor;
use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use chain::transaction::OutPoint;
-use chain::keysinterface::BaseSign;
+use chain::keysinterface::{BaseSign, KeysInterface};
use ln::{PaymentPreimage, PaymentSecret, PaymentHash};
-use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
-use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
+use ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT};
+use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, PAYMENT_EXPIRY_BLOCKS };
use ln::channel::{Channel, ChannelError};
use ln::{chan_utils, onion_utils};
-use ln::chan_utils::HTLC_SUCCESS_TX_WEIGHT;
-use routing::network_graph::{NetworkUpdate, RoutingFees};
-use routing::router::{Payee, Route, RouteHop, RouteHint, RouteHintHop, RouteParameters, find_route, get_route};
-use routing::scorer::Scorer;
+use ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
+use routing::router::{PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route};
use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
+use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ErrorAction};
use util::enforcing_trait_impls::EnforcingSigner;
use util::{byte_utils, test_utils};
use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::network::constants::Network;
-use bitcoin::hashes::sha256::Hash as Sha256;
-use bitcoin::hashes::Hash;
-
use bitcoin::secp256k1::Secp256k1;
-use bitcoin::secp256k1::key::{PublicKey,SecretKey};
+use bitcoin::secp256k1::{PublicKey,SecretKey};
use regex;
#[test]
fn test_insane_channel_opens() {
// Stand up a network of 2 nodes
+ use ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
+ let mut cfg = UserConfig::default();
+ cfg.peer_channel_config_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
// Instantiate channel parameters where we push the maximum msats given our
} else { assert!(false); }
};
- use ln::channel::MAX_FUNDING_SATOSHIS;
use ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
// Test all mutations that would make the channel open message insane
- insane_open_helper(format!("Funding must be smaller than {}. It was {}", MAX_FUNDING_SATOSHIS, MAX_FUNDING_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = MAX_FUNDING_SATOSHIS; msg });
+ insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
+ insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
- insane_open_helper(r"push_msat \d+ was larger than funding value \d+", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
+ insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
- insane_open_helper(r"Bogus; channel reserve \(\d+\) is less than dust limit \(\d+\)", |mut msg| { msg.dust_limit_satoshis = msg.channel_reserve_satoshis + 1; msg });
-
insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
}
+#[test]
+fn test_funding_exceeds_no_wumbo_limit() {
+ // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to
+ // them.
+ use ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ node_cfgs[1].features = InitFeatures::known().clear_wumbo();
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None) {
+ Err(APIError::APIMisuseError { err }) => {
+ assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
+ },
+ _ => panic!()
+ }
+}
+
+fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
+ // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
+ // but only for them. Because some LSPs do it with some level of trust of the clients (for a
+ // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often
+ // in normal testing, we test it explicitly here.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Have node0 initiate a channel to node1 with aforementioned parameters
+ let mut push_amt = 100_000_000;
+ let feerate_per_kw = 253;
+ let opt_anchors = false;
+ push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
+ push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+
+ let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap();
+ let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ if !send_from_initiator {
+ open_channel_message.channel_reserve_satoshis = 0;
+ open_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
+ }
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_message);
+
+ // Extract the channel accept message from node1 to node0
+ let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ if send_from_initiator {
+ accept_channel_message.channel_reserve_satoshis = 0;
+ accept_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
+ }
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel_message);
+ {
+ let mut lock;
+ let mut chan = get_channel_ref!(if send_from_initiator { &nodes[1] } else { &nodes[0] }, lock, temp_channel_id);
+ chan.holder_selected_channel_reserve_satoshis = 0;
+ chan.holder_max_htlc_value_in_flight_msat = 100_000_000;
+ }
+
+ let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
+ let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
+ create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0);
+
+ // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s
+ // security model if it ever tries to send funds back to nodes[0] (but that's not our problem).
+ if send_from_initiator {
+ send_payment(&nodes[0], &[&nodes[1]], 100_000_000
+ // Note that for outbound channels we have to consider the commitment tx fee and the
+ // "fee spike buffer", which is currently a multiple of the total commitment tx fee as
+ // well as an additional HTLC.
+ - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, opt_anchors));
+ } else {
+ send_payment(&nodes[1], &[&nodes[0]], push_amt);
+ }
+}
+
+#[test]
+fn test_counterparty_no_reserve() {
+ do_test_counterparty_no_reserve(true);
+ do_test_counterparty_no_reserve(false);
+}
+
#[test]
fn test_async_inbound_update_fee() {
let chanmon_cfgs = create_chanmon_cfgs(2);
check_added_monitors!(nodes[1], 1);
}
-fn do_test_1_conf_open(connect_style: ConnectStyle) {
- // Previously, if the minium_depth config was set to 1, we'd never send a funding_locked. This
- // tests that we properly send one in that case.
- let mut alice_config = UserConfig::default();
- alice_config.own_channel_config.minimum_depth = 1;
- alice_config.channel_options.announced_channel = true;
- alice_config.peer_channel_config_limits.force_announced_channel_preference = false;
- let mut bob_config = UserConfig::default();
- bob_config.own_channel_config.minimum_depth = 1;
- bob_config.channel_options.announced_channel = true;
- bob_config.peer_channel_config_limits.force_announced_channel_preference = false;
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(alice_config), Some(bob_config)]);
- let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- *nodes[0].connect_style.borrow_mut() = connect_style;
-
- let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
- mine_transaction(&nodes[1], &tx);
- nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
-
- mine_transaction(&nodes[0], &tx);
- let (funding_locked, _) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
- let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
-
- for node in nodes {
- assert!(node.net_graph_msg_handler.handle_channel_announcement(&announcement).unwrap());
- node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
- node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
- }
-}
-#[test]
-fn test_1_conf_open() {
- do_test_1_conf_open(ConnectStyle::BestBlockFirst);
- do_test_1_conf_open(ConnectStyle::TransactionsFirst);
- do_test_1_conf_open(ConnectStyle::FullBlockViaListen);
-}
-
fn do_test_sanity_on_in_flight_opens(steps: u8) {
// Previously, we had issues deserializing channels when we hadn't connected the first block
// after creation. To catch that and similar issues, we lean on the Node::drop impl to test
if steps & 0x0f == 2 { return; }
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
- let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 42);
+ let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
if steps & 0x0f == 3 { return; }
nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel_value = 1888;
- let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 700000, InitFeatures::known(), InitFeatures::known());
+ let channel_value = 5000;
+ let push_sats = 700;
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000, InitFeatures::known(), InitFeatures::known());
let channel_id = chan.2;
+ let secp_ctx = Secp256k1::new();
+ let bs_channel_reserve_sats = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value);
- let feerate = 260;
+ let opt_anchors = false;
+
+ // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee
+ // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we
+ // calculate two different feerates here - the expected local limit as well as the expected
+ // remote limit.
+ let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(opt_anchors) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
+ let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(opt_anchors)) as u32;
{
let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
*feerate_lock = feerate;
commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
- //Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate of 260 set above.
- //This value results in a fee that is exactly what the funder can afford (277 sat + 1000 sat channel reserve)
+ // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above.
{
let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone();
- //We made sure neither party's funds are below the dust limit so -2 non-HTLC txns from number of outputs
- let num_htlcs = commitment_tx.output.len() - 2;
- let total_fee: u64 = feerate as u64 * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
+ //We made sure neither party's funds are below the dust limit and there are no HTLCs here
+ assert_eq!(commitment_tx.output.len(), 2);
+ let total_fee: u64 = commit_tx_fee_msat(feerate, 0, opt_anchors) / 1000;
let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
actual_fee = channel_value - actual_fee;
assert_eq!(total_fee, actual_fee);
}
- //Add 2 to the previous fee rate to the final fee increases by 1 (with no HTLCs the fee is essentially
- //fee_rate*(724/1000) so the increment of 1*0.724 is rounded back down)
{
+ // Increment the feerate by a small constant, accounting for rounding errors
let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
- *feerate_lock = feerate + 2;
+ *feerate_lock += 4;
}
nodes[0].node.timer_tick_occurred();
- check_added_monitors!(nodes[0], 1);
+ nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
+ check_added_monitors!(nodes[0], 0);
+
+ const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
+
+ // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
+ // needed to sign the new commitment tx and (2) sign the new commitment tx.
+ let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
+ let chan_lock = nodes[0].node.channel_state.lock().unwrap();
+ let local_chan = chan_lock.by_id.get(&chan.2).unwrap();
+ let chan_signer = local_chan.get_signer();
+ let pubkeys = chan_signer.pubkeys();
+ (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
+ pubkeys.funding_pubkey)
+ };
+ let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
+ let chan_lock = nodes[1].node.channel_state.lock().unwrap();
+ let remote_chan = chan_lock.by_id.get(&chan.2).unwrap();
+ let chan_signer = remote_chan.get_signer();
+ let pubkeys = chan_signer.pubkeys();
+ (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
+ chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
+ pubkeys.funding_pubkey)
+ };
+
+ // Assemble the set of keys we can use for signatures for our commitment_signed message.
+ let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
+ &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap();
+
+ let res = {
+ let local_chan_lock = nodes[0].node.channel_state.lock().unwrap();
+ let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap();
+ let local_chan_signer = local_chan.get_signer();
+ let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
+ let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
+ INITIAL_COMMITMENT_NUMBER - 1,
+ push_sats,
+ channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, opt_anchors) / 1000,
+ opt_anchors, local_funding, remote_funding,
+ commit_tx_keys.clone(),
+ non_buffer_feerate + 4,
+ &mut htlcs,
+ &local_chan.channel_transaction_parameters.as_counterparty_broadcastable()
+ );
+ local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
+ };
+
+ let commit_signed_msg = msgs::CommitmentSigned {
+ channel_id: chan.2,
+ signature: res.0,
+ htlc_signatures: res.1
+ };
- let update2_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ let update_fee = msgs::UpdateFee {
+ channel_id: chan.2,
+ feerate_per_kw: non_buffer_feerate + 4,
+ };
- nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update2_msg.update_fee.unwrap());
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee);
//While producing the commitment_signed response after handling a received update_fee request the
//check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
//Should produce and error.
- nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update2_msg.commitment_signed);
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
});
hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
- let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![hops], payee: None }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
+ let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![hops], payment_params: None }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
let mut hops = Vec::with_capacity(3);
hops.push(RouteHop {
});
hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
- let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![hops], payee: None }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
+ let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![hops], payment_params: None }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
// Claim the rebalances...
fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000);
- let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, 0).unwrap();
+ let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200).unwrap();
send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret);
// Provide preimage to node 0 by claiming payment
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
assert_eq!(node_id, nodes[1].node.get_our_node_id());
- assert_eq!(msg.data, "Commitment or closing transaction was confirmed on chain.");
+ assert_eq!(msg.data, "Channel closed because commitment or closing transaction was confirmed on chain.");
},
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
assert!(update_add_htlcs.is_empty());
let channel_reserve = chan_stat.channel_reserve_msat;
// The 2* and +1 are for the fee spike reserve.
- let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], chan.2), 1 + 1);
+ let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], chan.2), 1 + 1, get_opt_anchors!(nodes[0], chan.2));
let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send + 1);
let err = nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).err().unwrap();
commitment_number,
95000,
local_chan_balance,
- false, local_funding, remote_funding,
+ local_chan.opt_anchors(), local_funding, remote_funding,
commit_tx_keys.clone(),
feerate_per_kw,
&mut vec![(accepted_htlc_info, ())],
&local_chan.channel_transaction_parameters.as_counterparty_broadcastable()
);
- local_chan_signer.sign_counterparty_commitment(&commitment_tx, &secp_ctx).unwrap()
+ local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
};
let commit_signed_msg = msgs::CommitmentSigned {
// sending any above-dust amount would result in a channel reserve violation.
// In this test we check that we would be prevented from sending an HTLC in
// this situation.
- let feerate_per_kw = 253;
- chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
- chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
+ let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let opt_anchors = false;
+
let mut push_amt = 100_000_000;
- push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
+ push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known());
// Sending exactly enough to hit the reserve amount should be accepted
- let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+ for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
+ let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+ }
// However one more HTLC should be significantly over the reserve amount and fail.
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
#[test]
fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
let mut chanmon_cfgs = create_chanmon_cfgs(2);
- // Set the fee rate for the channel very high, to the point where the funder
- // receiving 1 update_add_htlc would result in them closing the channel due
- // to channel reserve violation. This close could also happen if the fee went
- // up a more realistic amount, but many HTLCs were outstanding at the time of
- // the update_add_htlc.
- chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(6000) };
- chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(6000) };
+ let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
- let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000);
+ let opt_anchors = false;
+
+ // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
+ // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
+ // transaction fee with 0 HTLCs (183 sats)).
+ let mut push_amt = 100_000_000;
+ push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
+ push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known());
+
+ // Send four HTLCs to cover the initial push_msat buffer we're required to include
+ for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
+ let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+ }
+
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 700_000);
// Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
let secp_ctx = Secp256k1::new();
let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
- let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 1000, &Some(payment_secret), cur_height, &None).unwrap();
+ let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 700_000, &Some(payment_secret), cur_height, &None).unwrap();
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
let msg = msgs::UpdateAddHTLC {
channel_id: chan.2,
- htlc_id: 1,
- amount_msat: htlc_msat + 1,
+ htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64,
+ amount_msat: htlc_msat,
payment_hash: payment_hash,
cltv_expiry: htlc_cltv,
onion_routing_packet: onion_packet,
// Test that if we receive many dust HTLCs over an outbound channel, they don't count when
// calculating our commitment transaction fee (this was previously broken).
let mut chanmon_cfgs = create_chanmon_cfgs(2);
- let feerate_per_kw = 253;
- chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
- chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
+ let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let opt_anchors = false;
+
// Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
// channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
// transaction fee with 0 HTLCs (183 sats)).
let mut push_amt = 100_000_000;
- push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT) / 1000 * 1000;
+ push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt, InitFeatures::known(), InitFeatures::known());
let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
- + feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000 * 1000 - 1;
+ + feerate_per_kw as u64 * htlc_success_tx_weight(opt_anchors) / 1000 * 1000 - 1;
// In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
// reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
// commitment transaction fee.
let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt);
+ // Send four HTLCs to cover the initial push_msat buffer we're required to include
+ for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
+ let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+ }
+
// One more than the dust amt should fail, however.
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt + 1);
unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
}
+#[test]
+fn test_chan_init_feerate_unaffordability() {
+ // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to
+ // channel reserve and feerate requirements.
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let opt_anchors = false;
+
+ // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
+ // HTLC.
+ let mut push_amt = 100_000_000;
+ push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
+ assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None).unwrap_err(),
+ APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
+
+ // During open, we don't have a "counterparty channel reserve" to check against, so that
+ // requirement only comes into play on the open_channel handling side.
+ push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap();
+ let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ open_channel_msg.push_msat += 1;
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_msg);
+
+ let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ match msg_events[0] {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
+ assert_eq!(msg.data, "Insufficient funding amount for initial reserve");
+ },
+ _ => panic!("Unexpected event"),
+ }
+}
+
#[test]
fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
// Test that if we receive many dust HTLCs over an inbound channel, they don't count when
let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
let chan_stat = get_channel_value_stat!(nodes[0], chan.2);
let feerate = get_feerate!(nodes[0], chan.2);
+ let opt_anchors = get_opt_anchors!(nodes[0], chan.2);
// Add a 2* and +1 for the fee spike reserve.
- let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1);
+ let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors);
let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
// Attempt to trigger a channel reserve violation --> payment failure.
- let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2);
+ let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, opt_anchors);
let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
let (route_2, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_2);
assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
}
-fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64) -> u64 {
- (COMMITMENT_TX_BASE_WEIGHT + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
+fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, opt_anchors: bool) -> u64 {
+ (commitment_tx_base_weight(opt_anchors) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
}
#[test]
let feemsat = 239; // set above
let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
let feerate = get_feerate!(nodes[0], chan_1.2);
+ let opt_anchors = get_opt_anchors!(nodes[0], chan_1.2);
let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
// 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
// Also, ensure that each payment has enough to be over the dust limit to
// ensure it'll be included in each commit tx fee calculation.
- let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1);
+ let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, opt_anchors);
let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
break;
// the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
// is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
// policy.
- let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1);
+ let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors);
let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
let amt_msat_1 = recv_value_1 + total_fee_msat;
}
// split the rest to test holding cell
- let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1);
+ let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, opt_anchors);
let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
- let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1);
+ let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, opt_anchors);
let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
- let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1);
+ let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
let stat0 = get_channel_value_stat!(nodes[0], chan_1.2);
assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
check_added_monitors!(nodes[0], 1);
let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
- expect_payment_sent!(nodes[0], payment_preimage_1);
+ expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
check_added_monitors!(nodes[0], 1);
let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
- expect_payment_sent!(nodes[0], payment_preimage_2);
+ expect_payment_sent_without_paths!(nodes[0], payment_preimage_2);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
check_added_monitors!(nodes[1], 1);
// resolve the second HTLC from A's point of view.
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
check_added_monitors!(nodes[0], 1);
+ expect_payment_path_successful!(nodes[0]);
let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
// Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
check_added_monitors!(nodes[0], 1);
+ expect_payment_path_successful!(nodes[0]);
let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
- let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx
check_spends!(node_txn[0], revoked_local_txn[0]);
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
_ => panic!("Unexpected event"),
}
- if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[1] {
- } else { panic!(); }
- if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[2] {
- } else { panic!(); }
+ let chan_id = Some(chan_1.2);
+ match forwarded_events[1] {
+ Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx } => {
+ assert_eq!(fee_earned_msat, Some(1000));
+ assert_eq!(source_channel_id, chan_id);
+ assert_eq!(claim_from_onchain_tx, true);
+ },
+ _ => panic!()
+ }
+ match forwarded_events[2] {
+ Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx } => {
+ assert_eq!(fee_earned_msat, Some(1000));
+ assert_eq!(source_channel_id, chan_id);
+ assert_eq!(claim_from_onchain_tx, true);
+ },
+ _ => panic!()
+ }
let events = nodes[1].node.get_and_clear_pending_msg_events();
{
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 3);
+ assert_eq!(events.len(), 5);
let mut first_claimed = false;
for event in events {
match event {
assert_eq!(payment_hash, payment_hash_2);
}
},
+ Event::PaymentPathSuccessful { .. } => {},
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
_ => panic!("Unexpected event"),
}
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_added_monitors!(nodes[1], 1);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+ assert!(ANTI_REORG_DELAY > PAYMENT_EXPIRY_BLOCKS); // We assume payments will also expire
let events = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
+ assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 4 });
match events[0] {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
_ => panic!("Unexepected event"),
}
if !deliver_bs_raa {
match events[2] {
+ Event::PaymentFailed { ref payment_hash, .. } => {
+ assert_eq!(*payment_hash, fourth_payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[3] {
Event::PendingHTLCsForwardable { .. } => { },
_ => panic!("Unexpected event"),
};
match events[if deliver_bs_raa { 2 } else { 1 }] {
MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => {
assert_eq!(channel_id, chan_2.2);
- assert_eq!(data.as_str(), "Commitment or closing transaction was confirmed on chain.");
+ assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
},
_ => panic!("Unexpected event"),
}
check_added_monitors!(nodes[1], 1);
let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
- expect_payment_sent!(nodes[0], payment_preimage);
+ expect_payment_sent_without_paths!(nodes[0], payment_preimage);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
- assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ expect_payment_path_successful!(nodes[0]);
+}
+
+#[test]
+fn test_peer_disconnected_before_funding_broadcasted() {
+ // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
+ // before the funding transaction has been broadcasted.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never
+ // broadcasted, even though it's created by `nodes[0]`.
+ let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
+ let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
+ let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+
+ let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+ assert_eq!(temporary_channel_id, expected_temporary_channel_id);
+
+ assert!(nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).is_ok());
+
+ let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+ assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id);
+
+ // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is
+ // never sent to `nodes[1]`, and therefore the tx is never signed by either party nor
+ // broadcasted.
+ {
+ assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
+ }
+
+ // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
+ // disconnected before the funding transaction was broadcasted.
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
+ check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
}
#[test]
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
{
let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 2);
+ assert_eq!(events.len(), 3);
match events[0] {
Event::PaymentSent { payment_preimage, payment_hash, .. } => {
assert_eq!(payment_preimage, payment_preimage_3);
},
_ => panic!("Unexpected event"),
}
+ match events[2] {
+ Event::PaymentPathSuccessful { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
}
claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
if messages_delivered < 2 {
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
if messages_delivered < 1 {
- let events_4 = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events_4.len(), 1);
- match events_4[0] {
- Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
- assert_eq!(payment_preimage_1, *payment_preimage);
- assert_eq!(payment_hash_1, *payment_hash);
- },
- _ => panic!("Unexpected event"),
- }
+ expect_payment_sent!(nodes[0], payment_preimage_1);
} else {
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
}
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
}
+ if messages_delivered == 1 || messages_delivered == 2 {
+ expect_payment_path_successful!(nodes[0]);
+ }
+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ if messages_delivered > 2 {
+ expect_payment_path_successful!(nodes[0]);
+ }
+
// Channel should still work fine...
let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
confirm_transaction(&nodes[0], &tx);
let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
- let chan_id;
- assert_eq!(events_1.len(), 1);
- match events_1[0] {
- MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
- assert_eq!(*node_id, nodes[1].node.get_our_node_id());
- chan_id = msg.channel_id;
- },
- _ => panic!("Unexpected event"),
- }
+ assert!(events_1.is_empty());
reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
confirm_transaction(&nodes[1], &tx);
let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(events_2.len(), 2);
- let funding_locked = match events_2[0] {
+ assert!(events_2.is_empty());
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+ // nodes[0] hasn't yet received a funding_locked, so it only sends that on reconnect.
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
+ let events_3 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_3.len(), 1);
+ let as_funding_locked = match events_3[0] {
+ MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ msg.clone()
+ },
+ _ => panic!("Unexpected event {:?}", events_3[0]),
+ };
+
+ // nodes[1] received nodes[0]'s funding_locked on the first reconnect above, so it should send
+ // announcement_signatures as well as channel_update.
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
+ let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_4.len(), 3);
+ let chan_id;
+ let bs_funding_locked = match events_4[0] {
MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ chan_id = msg.channel_id;
msg.clone()
},
- _ => panic!("Unexpected event"),
+ _ => panic!("Unexpected event {:?}", events_4[0]),
};
- let bs_announcement_sigs = match events_2[1] {
+ let bs_announcement_sigs = match events_4[1] {
MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
msg.clone()
},
- _ => panic!("Unexpected event"),
+ _ => panic!("Unexpected event {:?}", events_4[1]),
};
+ match events_4[2] {
+ MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event {:?}", events_4[2]),
+ }
- reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ // Re-deliver nodes[0]'s funding_locked, which nodes[1] can safely ignore. It currently
+ // generates a duplicative private channel_update
+ nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding_locked);
+ let events_5 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_5.len(), 1);
+ match events_5[0] {
+ MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event {:?}", events_5[0]),
+ };
- nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &funding_locked);
- nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs);
- let events_3 = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events_3.len(), 2);
- let as_announcement_sigs = match events_3[0] {
+ // When we deliver nodes[1]'s funding_locked, however, nodes[0] will generate its
+ // announcement_signatures.
+ nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding_locked);
+ let events_6 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_6.len(), 1);
+ let as_announcement_sigs = match events_6[0] {
MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
msg.clone()
},
- _ => panic!("Unexpected event"),
+ _ => panic!("Unexpected event {:?}", events_6[0]),
};
- let (as_announcement, as_update) = match events_3[1] {
+
+ // When we deliver nodes[1]'s announcement_signatures to nodes[0], nodes[0] should immediately
+ // broadcast the channel announcement globally, as well as re-send its (now-public)
+ // channel_update.
+ nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs);
+ let events_7 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_7.len(), 1);
+ let (chan_announcement, as_update) = match events_7[0] {
MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
(msg.clone(), update_msg.clone())
},
- _ => panic!("Unexpected event"),
+ _ => panic!("Unexpected event {:?}", events_7[0]),
};
+ // Finally, deliver nodes[0]'s announcement_signatures to nodes[1] and make sure it creates the
+ // same channel_announcement.
nodes[1].node.handle_announcement_signatures(&nodes[0].node.get_our_node_id(), &as_announcement_sigs);
- let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(events_4.len(), 1);
- let (_, bs_update) = match events_4[0] {
+ let events_8 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_8.len(), 1);
+ let bs_update = match events_8[0] {
MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
- (msg.clone(), update_msg.clone())
+ assert_eq!(*msg, chan_announcement);
+ update_msg.clone()
},
- _ => panic!("Unexpected event"),
+ _ => panic!("Unexpected event {:?}", events_8[0]),
};
- nodes[0].net_graph_msg_handler.handle_channel_announcement(&as_announcement).unwrap();
+ // Provide the channel announcement and public updates to the network graph
+ nodes[0].net_graph_msg_handler.handle_channel_announcement(&chan_announcement).unwrap();
nodes[0].net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
nodes[0].net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
- // as_announcement should be re-generated exactly by broadcast_node_announcement.
+ // The channel announcement should be re-generated exactly by broadcast_node_announcement.
nodes[0].node.broadcast_node_announcement([0, 0, 0], [0; 32], Vec::new());
let msgs = nodes[0].node.get_and_clear_pending_msg_events();
let mut found_announcement = false;
for event in msgs.iter() {
match event {
MessageSendEvent::BroadcastChannelAnnouncement { ref msg, .. } => {
- if *msg == as_announcement { found_announcement = true; }
+ if *msg == chan_announcement { found_announcement = true; }
},
MessageSendEvent::BroadcastNodeAnnouncement { .. } => {},
_ => panic!("Unexpected event"),
assert!(found_announcement);
}
+#[test]
+fn test_funding_locked_without_best_block_updated() {
+ // Previously, if we were offline when a funding transaction was locked in, and then we came
+ // back online, calling best_block_updated once followed by transactions_confirmed, we'd not
+ // generate a funding_locked until a later best_block_updated. This tests that we generate the
+ // funding_locked immediately instead.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
+
+ let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0, InitFeatures::known(), InitFeatures::known());
+
+ let conf_height = nodes[0].best_block_info().1 + 1;
+ connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
+ let block_txn = [funding_tx];
+ let conf_txn: Vec<_> = block_txn.iter().enumerate().collect();
+ let conf_block_header = nodes[0].get_block_header(conf_height);
+ nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height);
+
+ // Ensure nodes[0] generates a funding_locked after the transactions_confirmed
+ let as_funding_locked = get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding_locked);
+}
+
#[test]
fn test_drop_messages_peer_disconnect_dual_htlc() {
// Test that we can handle reconnecting when both sides of a channel have pending
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[0], 1);
+ expect_payment_path_successful!(nodes[0]);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
}
// indicates there are more HTLCs coming.
let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
let payment_id = PaymentId([42; 32]);
- nodes[0].node.send_payment_along_path(&route.paths[0], &route.payee, &our_payment_hash, &Some(payment_secret), 200000, cur_height, payment_id, &None).unwrap();
+ nodes[0].node.send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200000, cur_height, payment_id, &None).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
}
check_added_monitors!(nodes[1], 0);
- connect_blocks(&nodes[1], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS);
+ connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
connect_blocks(&nodes[1], 1);
}
expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false);
} else {
- expect_payment_failed!(nodes[1], second_payment_hash, true);
- }
-}
-
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ if let Event::PaymentPathFailed { ref payment_hash, .. } = events[0] {
+ assert_eq!(*payment_hash, second_payment_hash);
+ } else { panic!("Unexpected event"); }
+ if let Event::PaymentFailed { ref payment_hash, .. } = events[1] {
+ assert_eq!(*payment_hash, second_payment_hash);
+ } else { panic!("Unexpected event"); }
+ }
+}
+
#[test]
fn test_holding_cell_htlc_add_timeouts() {
do_test_holding_cell_htlc_add_timeouts(false);
assert_eq!(nodes[0].node.list_channels().len(), 1);
check_added_monitors!(nodes[0], 1);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id()));
node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id()));
- let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&node_a, channel_value, 42);
+ let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&node_a, &node_b.node.get_our_node_id(), channel_value, 42);
node_a.node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
check_added_monitors!(node_a, 0);
assert_eq!(nodes[0].node.list_channels().len(), 1);
check_added_monitors!(nodes[0], 1);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
//... and we can even still claim the payment!
claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
- nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish);
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 1);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known());
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known());
nodes[1].node.force_close_channel(&chan.2).unwrap();
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
- let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan.3);
assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known());
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known());
nodes[0].node.force_close_channel(&chan.2).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
- let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
mine_transaction(&nodes[1], &node_txn[0]);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
}
// Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
- let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelManager : 2 (local commitment tx + HTLC-Success), ChannelMonitor: preimage tx
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (local commitment tx + HTLC-Success), ChannelMonitor: preimage tx
assert_eq!(node_txn.len(), 3);
check_spends!(node_txn[0], commitment_tx[0]);
assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
- let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 2);
assert_eq!(node_txn[0].input.len(), 2);
check_spends!(node_txn[0], revoked_local_txn[0]);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
- let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 3); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs, ChannelManager: local commitment tx
// The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
// including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
- let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(revoked_htlc_txn.len(), 2);
assert_eq!(revoked_htlc_txn[0].input.len(), 1);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
- let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx
// The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
_ => panic!("Unexpected event"),
}
match events[1] {
- Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => {
+ Event::PaymentForwarded { fee_earned_msat, source_channel_id, claim_from_onchain_tx } => {
assert_eq!(fee_earned_msat, Some(1000));
+ assert_eq!(source_channel_id, Some(chan_1.2));
assert_eq!(claim_from_onchain_tx, true);
},
_ => panic!("Unexpected event"),
let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
mine_transaction(&nodes[1], &commitment_tx[0]);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
- let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
// ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx
assert_eq!(b_txn.len(), 3);
check_spends!(b_txn[1], chan_1.3);
let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000);
- let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, 0).unwrap();
+ let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200).unwrap();
// We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
// script push size limit so that the below script length checks match
// ACCEPTED_HTLC_SCRIPT_WEIGHT.
- let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], vec![], 900000, TEST_FINAL_CLTV - 40);
+ let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id())
+ .with_features(InvoiceFeatures::known());
+ let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 900000, TEST_FINAL_CLTV - 40);
send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 900000, duplicate_payment_hash, payment_secret);
let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
// Note that the fee paid is effectively double as the HTLC value (including the nodes[1] fee
// and nodes[2] fee) is rounded down and then claimed in full.
mine_transaction(&nodes[1], &htlc_success_txn[0]);
- expect_payment_forwarded!(nodes[1], Some(196*2), true);
+ expect_payment_forwarded!(nodes[1], nodes[0], Some(196*2), true);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
assert!(updates.update_fail_htlcs.is_empty());
let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
// 2nd HTLC:
- send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee
+ send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200).unwrap()); // not added < dust limit + HTLC tx fee
// 3rd HTLC:
- send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee
+ send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200).unwrap()); // not added < dust limit + HTLC tx fee
// 4th HTLC:
let (_, payment_hash_3, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
// 5th HTLC:
let (_, payment_hash_4, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
// 6th HTLC:
- send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, 0).unwrap());
+ send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200).unwrap());
// 7th HTLC:
- send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, 0).unwrap());
+ send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200).unwrap());
// 8th HTLC:
let (_, payment_hash_5, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
// 9th HTLC:
let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
- send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee
+ send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200).unwrap()); // not added < dust limit + HTLC tx fee
// 10th HTLC:
let (_, payment_hash_6, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
// 11th HTLC:
let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
- send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, 0).unwrap());
+ send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200).unwrap());
// Double-check that six of the new HTLC were added
// We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
let seed = [42; 32];
let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
- let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager: &keys_manager, node_seed: seed, features: InitFeatures::known() };
+ let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager: &keys_manager, network_graph: &chanmon_cfgs[0].network_graph, node_seed: seed, features: InitFeatures::known() };
let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
node_cfgs.remove(0);
node_cfgs.insert(0, node);
check_spends!(local_txn_1[0], chan_1.3);
// We check funding pubkey are unique
- let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness[3][36..69]));
- let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness[3][36..69]));
+ let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]));
+ let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]));
if from_0_funding_key_0 == from_1_funding_key_0
|| from_0_funding_key_0 == from_1_funding_key_1
|| from_0_funding_key_1 == from_1_funding_key_0
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 });
+ let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 });
// Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
// present in B's local commitment transaction, but none of A's commitment transactions.
- assert!(nodes[1].node.claim_funds(our_payment_preimage));
+ assert!(nodes[1].node.claim_funds(payment_preimage));
check_added_monitors!(nodes[1], 1);
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
- let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- Event::PaymentSent { payment_preimage, payment_hash, .. } => {
- assert_eq!(payment_preimage, our_payment_preimage);
- assert_eq!(payment_hash, our_payment_hash);
- },
- _ => panic!("Unexpected event"),
- }
+ expect_payment_sent_without_paths!(nodes[0], payment_preimage);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
} else {
- expect_payment_failed!(nodes[0], our_payment_hash, true);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ if let Event::PaymentPathFailed { ref payment_hash, .. } = events[0] {
+ assert_eq!(*payment_hash, our_payment_hash);
+ } else { panic!("Unexpected event"); }
+ if let Event::PaymentFailed { ref payment_hash, .. } = events[1] {
+ assert_eq!(*payment_hash, our_payment_hash);
+ } else { panic!("Unexpected event"); }
}
}
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- //Force duplicate channel ids
+ // Force duplicate randomness for every get-random call
for node in nodes.iter() {
- *node.keys_manager.override_channel_id_priv.lock().unwrap() = Some([0; 32]);
+ *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]);
}
// BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &node0_to_1_send_open_channel);
+ get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
- //Create a second channel with a channel_id collision
- assert!(nodes[0].node.create_channel(nodes[0].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
+ // Create a second channel with the same random values. This used to panic due to a colliding
+ // channel_id, but now panics due to a colliding outbound SCID alias.
+ assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
}
#[test]
let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2);
let channel_reserve = chan_stat.channel_reserve_msat;
let feerate = get_feerate!(nodes[0], chan.2);
+ let opt_anchors = get_opt_anchors!(nodes[0], chan.2);
// 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
- let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1);
+ let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
// Send a payment which passes reserve checks but gets stuck in the holding cell.
let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2);
let channel_reserve = chan_stat.channel_reserve_msat;
let feerate = get_feerate!(nodes[0], chan.2);
+ let opt_anchors = get_opt_anchors!(nodes[0], chan.2);
// 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
let amt_1 = 20000;
- let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1) - amt_1;
+ let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors) - amt_1;
let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1);
let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2);
let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
- let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
- assert_eq!(*payment_preimage, payment_preimage_1);
- assert_eq!(*payment_hash, payment_hash_1);
- }
- _ => panic!("Unexpected event"),
- }
+ expect_payment_sent!(nodes[0], payment_preimage_1);
}
// Test that if we fail to forward an HTLC that is being freed from the holding cell that the
let mut chan_stat = get_channel_value_stat!(nodes[0], chan_0_1.2);
let channel_reserve = chan_stat.channel_reserve_msat;
let feerate = get_feerate!(nodes[0], chan_0_1.2);
+ let opt_anchors = get_opt_anchors!(nodes[0], chan_0_1.2);
// Send a payment which passes reserve checks but gets stuck in the holding cell.
let feemsat = 239;
let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
- let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1) - total_routing_fee_msat;
+ let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors) - total_routing_fee_msat;
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send);
let payment_event = {
nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, InitFeatures::known(), InitFeatures::known());
- let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], vec![], 100000000, 500000001);
+ let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id())
+ .with_features(InvoiceFeatures::known());
+ let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000, 0);
+ route.paths[0].last_mut().unwrap().cltv_expiry_delta = 500000001;
unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::RouteError { ref err },
assert_eq!(err, &"Channel CLTV overflowed?"));
}
let chan_stat = get_channel_value_stat!(nodes[0], chan.2);
let channel_reserve = chan_stat.channel_reserve_msat;
let feerate = get_feerate!(nodes[0], chan.2);
+ let opt_anchors = get_opt_anchors!(nodes[0], chan.2);
// The 2* and +1 are for the fee spike reserve.
- let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1);
+ let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
//Disconnect and Reconnect
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
// We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
- if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), 1000000, 1000000, 0, &low_our_to_self_config) {
+ if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
+ &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), 1000000, 1000000, 0,
+ &low_our_to_self_config, 0, 42)
+ {
match error {
APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
_ => panic!("Unexpected event"),
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
open_channel.to_self_delay = 200;
- if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &low_our_to_self_config) {
+ if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
+ &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0,
+ &low_our_to_self_config, 0, &nodes[0].logger, 42)
+ {
match error {
ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
_ => panic!("Unexpected event"),
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
open_channel.to_self_delay = 200;
- if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &high_their_to_self_config) {
+ if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
+ &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0,
+ &high_their_to_self_config, 0, &nodes[0].logger, 42)
+ {
match error {
ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
_ => panic!("Unexpected event"),
logger = test_utils::TestLogger::with_id(format!("node {}", 0));
let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1;
chain_source = test_utils::TestChainSource::new(Network::Testnet);
- tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
+ tx_broadcaster = test_utils::TestBroadcaster { txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new())) };
fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
persister = test_utils::TestPersister::new();
monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &fee_estimator, &persister, keys_manager);
check_added_monitors!(nodes[0], 1);
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
}
// Check we close channel detecting A is fallen-behind
+ // Check that we sent the warning message when we detected that A has fallen behind,
+ // and give the possibility for A to recover from the warning.
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() });
- assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
- check_added_monitors!(nodes[1], 1);
+ let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
+ assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
// Check A is able to claim to_remote output
- let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
- assert_eq!(node_txn.len(), 1);
- check_spends!(node_txn[0], chan.3);
- assert_eq!(node_txn[0].output.len(), 2);
- mine_transaction(&nodes[0], &node_txn[0]);
- connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() });
- let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
- assert_eq!(spend_txn.len(), 1);
- check_spends!(spend_txn[0], node_txn[0]);
+ let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+ // The node B should not broadcast the transaction to force close the channel!
+ assert!(node_txn.is_empty());
+ // B should now detect that there is something wrong and should force close the channel.
+ let exp_err = "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting";
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: exp_err.to_string() });
+
+ // after the warning message sent by B, we should not able to
+ // use the channel, or reconnect with success to the channel.
+ assert!(nodes[0].node.list_usable_channels().is_empty());
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+ let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
+ let mut err_msgs_0 = Vec::with_capacity(1);
+ for msg in nodes[0].node.get_and_clear_pending_msg_events() {
+ if let MessageSendEvent::HandleError { ref action, .. } = msg {
+ match action {
+ &ErrorAction::SendErrorMessage { ref msg } => {
+ assert_eq!(msg.data, "Failed to find corresponding channel");
+ err_msgs_0.push(msg.clone());
+ },
+ _ => panic!("Unexpected event!"),
+ }
+ } else {
+ panic!("Unexpected event!");
+ }
+ }
+ assert_eq!(err_msgs_0.len(), 1);
+ nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
+ assert!(nodes[1].node.list_usable_channels().is_empty());
+ check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_owned() });
+ check_closed_broadcast!(nodes[1], false);
}
#[test]
// Create some initial channels
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let scorer = Scorer::with_fixed_penalty(0);
- let payee = Payee::new(nodes[1].node.get_our_node_id()).with_features(InvoiceFeatures::known());
- let route = get_route(&nodes[0].node.get_our_node_id(), &payee, &nodes[0].net_graph_msg_handler.network_graph, None, 10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer).unwrap();
+ let scorer = test_utils::TestScorer::with_penalty(0);
+ let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
+ let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id()).with_features(InvoiceFeatures::known());
+ let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None, 10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
- let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, 0).unwrap();
+ let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200).unwrap();
nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
check_added_monitors!(nodes[0], 1);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let short_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let short_id_2 = create_announced_chan_between_nodes(&nodes, 1, 0, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let short_id_3 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes(&nodes, 1, 0, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
// Disconnect peers
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[0].node.timer_tick_occurred(); // DisabledStaged -> Disabled
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 3);
- let mut chans_disabled: HashSet<u64> = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect();
+ let mut chans_disabled = HashMap::new();
for e in msg_events {
match e {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
// Check that each channel gets updated exactly once
- if !chans_disabled.remove(&msg.contents.short_channel_id) {
+ if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() {
panic!("Generated ChannelUpdate for wrong chan!");
}
},
}
}
// Reconnect peers
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 3);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 3);
nodes[0].node.timer_tick_occurred();
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 3);
- chans_disabled = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect();
for e in msg_events {
match e {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
- // Check that each channel gets updated exactly once
- if !chans_disabled.remove(&msg.contents.short_channel_id) {
- panic!("Generated ChannelUpdate for wrong chan!");
+ match chans_disabled.remove(&msg.contents.short_channel_id) {
+ // Each update should have a higher timestamp than the previous one, replacing
+ // the old one.
+ Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp),
+ None => panic!("Generated ChannelUpdate for wrong chan!"),
}
},
_ => panic!("Unexpected event"),
}
}
-}
-
-#[test]
-fn test_priv_forwarding_rejection() {
- // If we have a private channel with outbound liquidity, and
- // UserConfig::accept_forwards_to_priv_channels is set to false, we should reject any attempts
- // to forward through that channel.
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let mut no_announce_cfg = test_default_channel_config();
- no_announce_cfg.channel_options.announced_channel = false;
- no_announce_cfg.accept_forwards_to_priv_channels = false;
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(no_announce_cfg), None]);
- let persister: test_utils::TestPersister;
- let new_chain_monitor: test_utils::TestChainMonitor;
- let nodes_1_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
- let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
-
- let chan_id_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known()).2;
-
- // Note that the create_*_chan functions in utils requires announcement_signatures, which we do
- // not send for private channels.
- nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
- let open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[2].node.get_our_node_id());
- nodes[2].node.handle_open_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel);
- let accept_channel = get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, nodes[1].node.get_our_node_id());
- nodes[1].node.handle_accept_channel(&nodes[2].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
-
- let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[1], 1_000_000, 42);
- nodes[1].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
- nodes[2].node.handle_funding_created(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, nodes[2].node.get_our_node_id()));
- check_added_monitors!(nodes[2], 1);
-
- let cs_funding_signed = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[1].node.get_our_node_id());
- nodes[1].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &cs_funding_signed);
- check_added_monitors!(nodes[1], 1);
-
- let conf_height = core::cmp::max(nodes[1].best_block_info().1 + 1, nodes[2].best_block_info().1 + 1);
- confirm_transaction_at(&nodes[1], &tx, conf_height);
- connect_blocks(&nodes[1], CHAN_CONFIRM_DEPTH - 1);
- confirm_transaction_at(&nodes[2], &tx, conf_height);
- connect_blocks(&nodes[2], CHAN_CONFIRM_DEPTH - 1);
- let as_funding_locked = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[2].node.get_our_node_id());
- nodes[1].node.handle_funding_locked(&nodes[2].node.get_our_node_id(), &get_event_msg!(nodes[2], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
- get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id());
- nodes[2].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &as_funding_locked);
- get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
-
- assert!(nodes[0].node.list_usable_channels()[0].is_public);
- assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
- assert!(!nodes[2].node.list_usable_channels()[0].is_public);
-
- // We should always be able to forward through nodes[1] as long as its out through a public
- // channel:
- send_payment(&nodes[2], &[&nodes[1], &nodes[0]], 10_000);
-
- // ... however, if we send to nodes[2], we will have to pass the private channel from nodes[1]
- // to nodes[2], which should be rejected:
- let route_hint = RouteHint(vec![RouteHintHop {
- src_node_id: nodes[1].node.get_our_node_id(),
- short_channel_id: nodes[2].node.list_channels()[0].short_channel_id.unwrap(),
- fees: RoutingFees { base_msat: 1000, proportional_millionths: 0 },
- cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA,
- htlc_minimum_msat: None,
- htlc_maximum_msat: None,
- }]);
- let last_hops = vec![route_hint];
- let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], last_hops, 10_000, TEST_FINAL_CLTV);
-
- nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
- check_added_monitors!(nodes[0], 1);
- let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
- nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
- commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true);
-
- let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
- assert!(htlc_fail_updates.update_add_htlcs.is_empty());
- assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1);
- assert!(htlc_fail_updates.update_fail_malformed_htlcs.is_empty());
- assert!(htlc_fail_updates.update_fee.is_none());
-
- nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]);
- commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates.commitment_signed, true, true);
- expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, nodes[2].node.list_channels()[0].short_channel_id.unwrap(), true);
-
- // Now disconnect nodes[1] from its peers and restart with accept_forwards_to_priv_channels set
- // to true. Sadly there is currently no way to change it at runtime.
-
- nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
- nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-
- let nodes_1_serialized = nodes[1].node.encode();
- let mut monitor_a_serialized = test_utils::TestVecWriter(Vec::new());
- let mut monitor_b_serialized = test_utils::TestVecWriter(Vec::new());
- get_monitor!(nodes[1], chan_id_1).write(&mut monitor_a_serialized).unwrap();
- get_monitor!(nodes[1], cs_funding_signed.channel_id).write(&mut monitor_b_serialized).unwrap();
-
- persister = test_utils::TestPersister::new();
- let keys_manager = &chanmon_cfgs[1].keys_manager;
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[1].chain_source), nodes[1].tx_broadcaster.clone(), nodes[1].logger, node_cfgs[1].fee_estimator, &persister, keys_manager);
- nodes[1].chain_monitor = &new_chain_monitor;
-
- let mut monitor_a_read = &monitor_a_serialized.0[..];
- let mut monitor_b_read = &monitor_b_serialized.0[..];
- let (_, mut monitor_a) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut monitor_a_read, keys_manager).unwrap();
- let (_, mut monitor_b) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut monitor_b_read, keys_manager).unwrap();
- assert!(monitor_a_read.is_empty());
- assert!(monitor_b_read.is_empty());
-
- no_announce_cfg.accept_forwards_to_priv_channels = true;
-
- let mut nodes_1_read = &nodes_1_serialized[..];
- let (_, nodes_1_deserialized_tmp) = {
- let mut channel_monitors = HashMap::new();
- channel_monitors.insert(monitor_a.get_funding_txo().0, &mut monitor_a);
- channel_monitors.insert(monitor_b.get_funding_txo().0, &mut monitor_b);
- <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_1_read, ChannelManagerReadArgs {
- default_config: no_announce_cfg,
- keys_manager,
- fee_estimator: node_cfgs[1].fee_estimator,
- chain_monitor: nodes[1].chain_monitor,
- tx_broadcaster: nodes[1].tx_broadcaster.clone(),
- logger: nodes[1].logger,
- channel_monitors,
- }).unwrap()
- };
- assert!(nodes_1_read.is_empty());
- nodes_1_deserialized = nodes_1_deserialized_tmp;
-
- assert!(nodes[1].chain_monitor.watch_channel(monitor_a.get_funding_txo().0, monitor_a).is_ok());
- assert!(nodes[1].chain_monitor.watch_channel(monitor_b.get_funding_txo().0, monitor_b).is_ok());
- check_added_monitors!(nodes[1], 2);
- nodes[1].node = &nodes_1_deserialized;
-
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
- let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
- let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
- nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
- nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
- get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
- get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
-
- nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
- nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
- let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[2].node.get_our_node_id());
- let cs_reestablish = get_event_msg!(nodes[2], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
- nodes[2].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
- nodes[1].node.handle_channel_reestablish(&nodes[2].node.get_our_node_id(), &cs_reestablish);
- get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id());
- get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
-
- nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
- check_added_monitors!(nodes[0], 1);
- pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 10_000, our_payment_hash, our_payment_secret);
- claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], our_payment_preimage);
+ // Check that each channel gets updated exactly once
+ assert!(chans_disabled.is_empty());
}
#[test]
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
- let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], vec![], 3000000, 30);
+ let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id())
+ .with_features(InvoiceFeatures::known());
+ let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000, 30);
send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2);
assert_eq!(node_txn[0].output.len(), 1);
check_spends!(node_txn[0], revoked_txn[0]);
let fee_1 = penalty_sum - node_txn[0].output[0].value;
- feerate_1 = fee_1 * 1000 / node_txn[0].get_weight() as u64;
+ feerate_1 = fee_1 * 1000 / node_txn[0].weight() as u64;
penalty_1 = node_txn[0].txid();
node_txn.clear();
};
// Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
assert_ne!(penalty_2, penalty_1);
let fee_2 = penalty_sum - node_txn[0].output[0].value;
- feerate_2 = fee_2 * 1000 / node_txn[0].get_weight() as u64;
+ feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64;
// Verify 25% bump heuristic
assert!(feerate_2 * 100 >= feerate_1 * 125);
node_txn.clear();
// Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
assert_ne!(penalty_3, penalty_2);
let fee_3 = penalty_sum - node_txn[0].output[0].value;
- feerate_3 = fee_3 * 1000 / node_txn[0].get_weight() as u64;
+ feerate_3 = fee_3 * 1000 / node_txn[0].weight() as u64;
// Verify 25% bump heuristic
assert!(feerate_3 * 100 >= feerate_2 * 125);
node_txn.clear();
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
// Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
- let payee = Payee::new(nodes[1].node.get_our_node_id()).with_features(InvoiceFeatures::known());
- let scorer = Scorer::with_fixed_penalty(0);
- let route = get_route(&nodes[0].node.get_our_node_id(), &payee, &nodes[0].net_graph_msg_handler.network_graph,
- None, 3_000_000, 50, nodes[0].logger, &scorer).unwrap();
+ let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id()).with_features(InvoiceFeatures::known());
+ let scorer = test_utils::TestScorer::with_penalty(0);
+ let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
+ let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None,
+ 3_000_000, 50, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
- let payee = Payee::new(nodes[0].node.get_our_node_id()).with_features(InvoiceFeatures::known());
- let route = get_route(&nodes[1].node.get_our_node_id(), &payee, &nodes[1].net_graph_msg_handler.network_graph,
- None, 3_000_000, 50, nodes[0].logger, &scorer).unwrap();
+ let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id()).with_features(InvoiceFeatures::known());
+ let route = get_route(&nodes[1].node.get_our_node_id(), &payment_params, &nodes[1].network_graph.read_only(), None,
+ 3_000_000, 50, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
first = node_txn[4].txid();
// Store both feerates for later comparison
let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[2].output[0].value - node_txn[4].output[0].value;
- feerate_1 = fee_1 * 1000 / node_txn[4].get_weight() as u64;
+ feerate_1 = fee_1 * 1000 / node_txn[4].weight() as u64;
penalty_txn = vec![node_txn[2].clone()];
node_txn.clear();
}
// Verify bumped tx is different and 25% bump heuristic
assert_ne!(first, node_txn[0].txid());
let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[2].output[0].value - node_txn[0].output[0].value;
- let feerate_2 = fee_2 * 1000 / node_txn[0].get_weight() as u64;
+ let feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64;
assert!(feerate_2 * 100 > feerate_1 * 125);
let txn = vec![node_txn[0].clone()];
node_txn.clear();
timeout = node_txn[6].txid();
let index = node_txn[6].input[0].previous_output.vout;
let fee = remote_txn[0].output[index as usize].value - node_txn[6].output[0].value;
- feerate_timeout = fee * 1000 / node_txn[6].get_weight() as u64;
+ feerate_timeout = fee * 1000 / node_txn[6].weight() as u64;
preimage = node_txn[0].txid();
let index = node_txn[0].input[0].previous_output.vout;
let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
- feerate_preimage = fee * 1000 / node_txn[0].get_weight() as u64;
+ feerate_preimage = fee * 1000 / node_txn[0].weight() as u64;
node_txn.clear();
};
let index = preimage_bump.input[0].previous_output.vout;
let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value;
- let new_feerate = fee * 1000 / preimage_bump.get_weight() as u64;
+ let new_feerate = fee * 1000 / preimage_bump.weight() as u64;
assert!(new_feerate * 100 > feerate_timeout * 125);
assert_ne!(timeout, preimage_bump.txid());
let index = node_txn[0].input[0].previous_output.vout;
let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
- let new_feerate = fee * 1000 / node_txn[0].get_weight() as u64;
+ let new_feerate = fee * 1000 / node_txn[0].weight() as u64;
assert!(new_feerate * 100 > feerate_preimage * 125);
assert_ne!(preimage, node_txn[0].txid());
}
}
+#[test]
+fn test_pending_claimed_htlc_no_balance_underflow() {
+ // Tests that if we have a pending outbound HTLC as well as a claimed-but-not-fully-removed
+ // HTLC we will not underflow when we call `Channel::get_balance_msat()`.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+
+ let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1_010_000).0;
+ nodes[1].node.claim_funds(payment_preimage);
+ check_added_monitors!(nodes[1], 1);
+ let fulfill_ev = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &fulfill_ev.update_fulfill_htlcs[0]);
+ expect_payment_sent_without_paths!(nodes[0], payment_preimage);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &fulfill_ev.commitment_signed);
+ check_added_monitors!(nodes[0], 1);
+ let (_raa, _cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ // At this point nodes[1] has received 1,010k msat (10k msat more than their reserve) and can
+ // send an HTLC back (though it will go in the holding cell). Send an HTLC back and check we
+ // can get our balance.
+
+ // Get a route from nodes[1] to nodes[0] by getting a route going the other way and then flip
+ // the public key of the only hop. This works around ChannelDetails not showing the
+ // almost-claimed HTLC as available balance.
+ let (mut route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 10_000);
+ route.payment_params = None; // This is all wrong, but unnecessary
+ route.paths[0][0].pubkey = nodes[0].node.get_our_node_id();
+ let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]);
+ nodes[1].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
+
+ assert_eq!(nodes[1].node.list_channels()[0].balance_msat, 1_000_000);
+}
+
+#[test]
+fn test_channel_conf_timeout() {
+ // Tests that, for inbound channels, we give up on them if the funding transaction does not
+ // confirm within 2016 blocks, as recommended by BOLT 2.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000, InitFeatures::known(), InitFeatures::known());
+
+ // The outbound node should wait forever for confirmation:
+ // This matches `channel::FUNDING_CONF_DEADLINE_BLOCKS` and BOLT 2's suggested timeout, thus is
+ // copied here instead of directly referencing the constant.
+ connect_blocks(&nodes[0], 2016);
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ // The inbound node should fail the channel after exactly 2016 blocks
+ connect_blocks(&nodes[1], 2015);
+ check_added_monitors!(nodes[1], 0);
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ connect_blocks(&nodes[1], 1);
+ check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut);
+ let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(close_ev.len(), 1);
+ match close_ev[0] {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, ref node_id } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ assert_eq!(msg.data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
+ },
+ _ => panic!("Unexpected event"),
+ }
+}
+
#[test]
fn test_override_channel_config() {
let chanmon_cfgs = create_chanmon_cfgs(2);
assert_eq!(res.htlc_minimum_msat, 1);
}
+#[test]
+fn test_channel_update_has_correct_htlc_maximum_msat() {
+ // Tests that the `ChannelUpdate` message has the correct values for `htlc_maximum_msat` set.
+ // Bolt 7 specifies that if present `htlc_maximum_msat`:
+ // 1. MUST be set to less than or equal to the channel capacity. In LDK, this is capped to
+ // 90% of the `channel_value`.
+ // 2. MUST be set to less than or equal to the `max_htlc_value_in_flight_msat` received from the peer.
+
+ let mut config_30_percent = UserConfig::default();
+ config_30_percent.channel_options.announced_channel = true;
+ config_30_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
+ let mut config_50_percent = UserConfig::default();
+ config_50_percent.channel_options.announced_channel = true;
+ config_50_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
+ let mut config_95_percent = UserConfig::default();
+ config_95_percent.channel_options.announced_channel = true;
+ config_95_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
+ let mut config_100_percent = UserConfig::default();
+ config_100_percent.channel_options.announced_channel = true;
+ config_100_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
+
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ let channel_value_satoshis = 100000;
+ let channel_value_msat = channel_value_satoshis * 1000;
+ let channel_value_30_percent_msat = (channel_value_msat as f64 * 0.3) as u64;
+ let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64;
+ let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64;
+
+ let (node_0_chan_update, node_1_chan_update, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001, InitFeatures::known(), InitFeatures::known());
+ let (node_2_chan_update, node_3_chan_update, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001, InitFeatures::known(), InitFeatures::known());
+
+ // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
+ // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
+ assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_50_percent_msat));
+ // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
+ // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
+ assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_30_percent_msat));
+
+ // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
+ // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
+ // `channel_value`.
+ assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
+ // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
+ // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
+ // `channel_value`.
+ assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
+}
+
+#[test]
+fn test_manually_accept_inbound_channel_request() {
+ let mut manually_accept_conf = UserConfig::default();
+ manually_accept_conf.manually_accept_inbound_channels = true;
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
+ let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &res);
+
+ // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
+ // accepting the inbound channel request.
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ nodes[1].node.accept_inbound_channel(&temporary_channel_id, 23).unwrap();
+ }
+ _ => panic!("Unexpected event"),
+ }
+
+ let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(accept_msg_ev.len(), 1);
+
+ match accept_msg_ev[0] {
+ MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ }
+ _ => panic!("Unexpected event"),
+ }
+
+ nodes[1].node.force_close_channel(&temp_channel_id).unwrap();
+
+ let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(close_msg_ev.len(), 1);
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::ChannelClosed { user_channel_id, .. } => {
+ assert_eq!(user_channel_id, 23);
+ }
+ _ => panic!("Unexpected event"),
+ }
+}
+
+#[test]
+fn test_manually_reject_inbound_channel_request() {
+ let mut manually_accept_conf = UserConfig::default();
+ manually_accept_conf.manually_accept_inbound_channels = true;
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
+ let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &res);
+
+ // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
+ // rejecting the inbound channel request.
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ nodes[1].node.force_close_channel(&temporary_channel_id).unwrap();
+ }
+ _ => panic!("Unexpected event"),
+ }
+
+ let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(close_msg_ev.len(), 1);
+
+ match close_msg_ev[0] {
+ MessageSendEvent::HandleError { ref node_id, .. } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ }
+ _ => panic!("Unexpected event"),
+ }
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+}
+
+#[test]
+fn test_reject_funding_before_inbound_channel_accepted() {
+ // This tests that when `UserConfig::manually_accept_inbound_channels` is set to true, inbound
+ // channels must to be manually accepted through `ChannelManager::accept_inbound_channel` by
+ // the node operator before the counterparty sends a `FundingCreated` message. If a
+ // `FundingCreated` message is received before the channel is accepted, it should be rejected
+ // and the channel should be closed.
+ let mut manually_accept_conf = UserConfig::default();
+ manually_accept_conf.manually_accept_inbound_channels = true;
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
+ let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ let temp_channel_id = res.temporary_channel_id;
+
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &res);
+
+ // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in the `msg_events`.
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ // Clear the `Event::OpenChannelRequest` event without responding to the request.
+ nodes[1].node.get_and_clear_pending_events();
+
+ // Get the `AcceptChannel` message of `nodes[1]` without calling
+ // `ChannelManager::accept_inbound_channel`, which generates a
+ // `MessageSendEvent::SendAcceptChannel` event. The message is passed to `nodes[0]`
+ // `handle_accept_channel`, which is required in order for `create_funding_transaction` to
+ // succeed when `nodes[0]` is passed to it.
+ {
+ let mut lock;
+ let channel = get_channel_ref!(&nodes[1], lock, temp_channel_id);
+ let accept_chan_msg = channel.get_accept_channel_message();
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
+ }
+
+ let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
+
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
+ let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+
+ // The `funding_created_msg` should be rejected by `nodes[1]` as it hasn't accepted the channel
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+
+ let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(close_msg_ev.len(), 1);
+
+ let expected_err = "FundingCreated message received before the channel was accepted";
+ match close_msg_ev[0] {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, ref node_id, } => {
+ assert_eq!(msg.channel_id, temp_channel_id);
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ assert_eq!(msg.data, expected_err);
+ }
+ _ => panic!("Unexpected event"),
+ }
+
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() });
+}
+
+#[test]
+fn test_can_not_accept_inbound_channel_twice() {
+ let mut manually_accept_conf = UserConfig::default();
+ manually_accept_conf.manually_accept_inbound_channels = true;
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
+ let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &res);
+
+ // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
+ // accepting the inbound channel request.
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ nodes[1].node.accept_inbound_channel(&temporary_channel_id, 0).unwrap();
+ let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, 0);
+ match api_res {
+ Err(APIError::APIMisuseError { err }) => {
+ assert_eq!(err, "The channel isn't currently awaiting to be accepted.");
+ },
+ Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"),
+ Err(_) => panic!("Unexpected Error"),
+ }
+ }
+ _ => panic!("Unexpected event"),
+ }
+
+ // Ensure that the channel wasn't closed after attempting to accept it twice.
+ let accept_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(accept_msg_ev.len(), 1);
+
+ match accept_msg_ev[0] {
+ MessageSendEvent::SendAcceptChannel { ref node_id, .. } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ }
+ _ => panic!("Unexpected event"),
+ }
+}
+
+#[test]
+fn test_can_not_accept_unknown_inbound_channel() {
+ let chanmon_cfg = create_chanmon_cfgs(1);
+ let node_cfg = create_node_cfgs(1, &chanmon_cfg);
+ let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
+ let node = create_network(1, &node_cfg, &node_chanmgr)[0].node;
+
+ let unknown_channel_id = [0; 32];
+ let api_res = node.accept_inbound_channel(&unknown_channel_id, 0);
+ match api_res {
+ Err(APIError::ChannelUnavailable { err }) => {
+ assert_eq!(err, "Can't accept a channel that doesn't exist");
+ },
+ Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"),
+ Err(_) => panic!("Unexpected Error"),
+ }
+}
+
#[test]
fn test_simple_mpp() {
// Simple test of sending a multi-path payment.
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
{
- let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, 42);
+ let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200).unwrap();
let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
check_added_monitors!(nodes[0], 1);
match events[0] {
Event::PaymentReceived { ref purpose, .. } => {
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, user_payment_id, .. } => {
- assert_eq!(*user_payment_id, 42);
+ PaymentPurpose::InvoicePayment { payment_preimage, .. } => {
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
},
_ => panic!("expected PaymentPurpose::InvoicePayment")
}
#[test]
+#[allow(deprecated)]
fn test_secret_timeout() {
- // Simple test of payment secret storage time outs
+ // Simple test of payment secret storage time outs. After
+ // `create_inbound_payment(_for_hash)_legacy` is removed, this test will be removed as well.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment(Some(100_000), 2, 0);
+ let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment_legacy(Some(100_000), 2).unwrap();
// We should fail to register the same payment hash twice, at least until we've connected a
// block with time 7200 + CHAN_CONFIRM_DEPTH + 1.
- if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 0) {
+ if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) {
assert_eq!(err, "Duplicate payment hash");
} else { panic!(); }
let mut block = {
}
};
connect_block(&nodes[1], &block);
- if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 0) {
+ if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) {
assert_eq!(err, "Duplicate payment hash");
} else { panic!(); }
// If we then connect the second block, we should be able to register the same payment hash
- // again with a different user_payment_id (this time getting a new payment secret).
+ // again (this time getting a new payment secret).
block.header.prev_blockhash = block.header.block_hash();
block.header.time += 1;
connect_block(&nodes[1], &block);
- let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 42).unwrap();
+ let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2).unwrap();
assert_ne!(payment_secret_1, our_payment_secret);
{
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- Event::PaymentReceived { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, user_payment_id }, .. } => {
+ Event::PaymentReceived { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret }, .. } => {
assert!(payment_preimage.is_none());
- assert_eq!(user_payment_id, 42);
assert_eq!(payment_secret, our_payment_secret);
// We don't actually have the payment preimage with which to claim this payment!
},
let random_payment_hash = PaymentHash([42; 32]);
let random_payment_secret = PaymentSecret([43; 32]);
- let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, 0);
+ let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2).unwrap();
let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
// All the below cases should end up being handled exactly identically, so we macro the
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
// Move the first channel through the funding flow...
- let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 100000, 42);
+ let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
check_added_monitors!(nodes[0], 0);
};
let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
- let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
if broadcast_alice {
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
- expect_payment_forwarded!(nodes[1], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false);
+ expect_payment_forwarded!(nodes[1], nodes[0], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false);
// If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
if !go_onchain_before_fulfill && broadcast_alice {
let events = nodes[1].node.get_and_clear_pending_msg_events();
}
// Move the first channel through the funding flow...
- let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 42);
+ let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
check_added_monitors!(nodes[0], 0);
let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_2_msg);
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
- create_funding_transaction(&nodes[0], 100000, 42); // Get and check the FundingGenerationReady event
+ create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
let funding_created = {
let mut a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
- let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], 100_000, 42);
+ let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
for output in tx.output.iter_mut() {
// Make the confirmed funding transaction have a bogus script_pubkey
output.script_pubkey = bitcoin::Script::new();
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+ let expected_err = "funding tx had wrong script/value or output index";
confirm_transaction_at(&nodes[1], &tx, 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() });
check_added_monitors!(nodes[1], 1);
let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events_2.len(), 1);
if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
if let msgs::ErrorAction::SendErrorMessage { msg } = action {
- assert_eq!(msg.data, "funding tx had wrong script/value or output index");
+ assert_eq!(msg.data, "Channel closed because of an exception: ".to_owned() + expected_err);
} else { panic!(); }
} else { panic!(); }
assert_eq!(nodes[1].node.list_channels().len(), 0);
claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
}
+fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+ let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id())
+ .with_features(InvoiceFeatures::known());
+ let route = get_route!(nodes[0], payment_params, 10_000, TEST_FINAL_CLTV).unwrap();
+
+ let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
+
+ {
+ nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let mut payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+ }
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], our_payment_hash, our_payment_secret, 10_000);
+
+ {
+ nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let mut payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+ // At this point, nodes[1] would notice it has too much value for the payment. It will
+ // assume the second is a privacy attack (no longer particularly relevant
+ // post-payment_secrets) and fail back the new HTLC. Previously, it'd also have failed back
+ // the first HTLC delivered above.
+ }
+
+ expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ nodes[1].node.process_pending_htlc_forwards();
+
+ if test_for_second_fail_panic {
+ // Now we go fail back the first HTLC from the user end.
+ nodes[1].node.fail_htlc_backwards(&our_payment_hash);
+
+ expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ nodes[1].node.process_pending_htlc_forwards();
+
+ check_added_monitors!(nodes[1], 1);
+ let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
+
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
+ commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
+
+ let failure_events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(failure_events.len(), 2);
+ if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
+ if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); }
+ } else {
+ // Let the second HTLC fail and claim the first
+ expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ nodes[1].node.process_pending_htlc_forwards();
+
+ check_added_monitors!(nodes[1], 1);
+ let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
+
+ expect_payment_failed_conditions!(nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
+
+ claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
+ }
+}
+
+#[test]
+fn test_dup_htlc_second_fail_panic() {
+ // Previously, if we received two HTLCs back-to-back, where the second overran the expected
+ // value for the payment, we'd fail back both HTLCs after generating a `PaymentReceived` event.
+ // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
+ // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
+ do_test_dup_htlc_second_rejected(true);
+}
+
+#[test]
+fn test_dup_htlc_second_rejected() {
+ // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we
+ // simply reject the second HTLC but are still able to claim the first HTLC.
+ do_test_dup_htlc_second_rejected(false);
+}
+
+#[test]
+fn test_inconsistent_mpp_params() {
+ // Test that if we recieve two HTLCs with different payment parameters we fail back the first
+ // such HTLC and allow the second to stay.
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+
+ let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id())
+ .with_features(InvoiceFeatures::known());
+ let mut route = get_route!(nodes[0], payment_params, 15_000_000, TEST_FINAL_CLTV).unwrap();
+ assert_eq!(route.paths.len(), 2);
+ route.paths.sort_by(|path_a, _| {
+ // Sort the path so that the path through nodes[1] comes first
+ if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
+ core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
+ });
+ let payment_params_opt = Some(payment_params);
+
+ let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
+
+ let cur_height = nodes[0].best_block_info().1;
+ let payment_id = PaymentId([42; 32]);
+ {
+ nodes[0].node.send_payment_along_path(&route.paths[0], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
+ }
+ assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
+
+ {
+ nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 14_000_000, cur_height, payment_id, &None).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+
+ nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors!(nodes[2], 1);
+
+ let mut events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+
+ nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
+ check_added_monitors!(nodes[3], 0);
+ commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true);
+
+ // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment
+ // amount. It will assume the second is a privacy attack (no longer particularly relevant
+ // post-payment_secrets) and fail back the new HTLC.
+ }
+ expect_pending_htlcs_forwardable_ignore!(nodes[3]);
+ nodes[3].node.process_pending_htlc_forwards();
+ expect_pending_htlcs_forwardable_ignore!(nodes[3]);
+ nodes[3].node.process_pending_htlc_forwards();
+
+ check_added_monitors!(nodes[3], 1);
+
+ let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
+
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors!(nodes[2], 1);
+
+ let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false);
+
+ expect_payment_failed_conditions!(nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
+
+ nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
+
+ claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
+}
+
#[test]
fn test_keysend_payments_to_public_node() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
- let network_graph = &nodes[0].net_graph_msg_handler.network_graph;
+ let network_graph = nodes[0].network_graph;
let payer_pubkey = nodes[0].node.get_our_node_id();
let payee_pubkey = nodes[1].node.get_our_node_id();
- let params = RouteParameters {
- payee: Payee::for_keysend(payee_pubkey),
+ let route_params = RouteParameters {
+ payment_params: PaymentParameters::for_keysend(payee_pubkey),
final_value_msat: 10000,
final_cltv_expiry_delta: 40,
};
- let scorer = Scorer::with_fixed_penalty(0);
- let route = find_route(&payer_pubkey, ¶ms, &network_graph, None, nodes[0].logger, &scorer).unwrap();
+ let scorer = test_utils::TestScorer::with_penalty(0);
+ let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
+ let route = find_route(&payer_pubkey, &route_params, network_graph, None, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
let payer_pubkey = nodes[0].node.get_our_node_id();
let payee_pubkey = nodes[1].node.get_our_node_id();
- nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
- nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+ nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+ nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
- let params = RouteParameters {
- payee: Payee::for_keysend(payee_pubkey),
+ let route_params = RouteParameters {
+ payment_params: PaymentParameters::for_keysend(payee_pubkey),
final_value_msat: 10000,
final_cltv_expiry_delta: 40,
};
- let network_graph = &nodes[0].net_graph_msg_handler.network_graph;
+ let network_graph = nodes[0].network_graph;
let first_hops = nodes[0].node.list_usable_channels();
- let scorer = Scorer::with_fixed_penalty(0);
+ let scorer = test_utils::TestScorer::with_penalty(0);
+ let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
- &payer_pubkey, ¶ms, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
- nodes[0].logger, &scorer
+ &payer_pubkey, &route_params, network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
+ nodes[0].logger, &scorer, &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
claim_payment(&nodes[0], &path, test_preimage);
}
+
+/// The possible events which may trigger a `max_dust_htlc_exposure` breach
+#[derive(Clone, Copy, PartialEq)]
+enum ExposureEvent {
+ /// Breach occurs at HTLC forwarding (see `send_htlc`)
+ AtHTLCForward,
+ /// Breach occurs at HTLC reception (see `update_add_htlc`)
+ AtHTLCReception,
+ /// Breach occurs at outbound update_fee (see `send_update_fee`)
+ AtUpdateFeeOutbound,
+}
+
+fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool) {
+ // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
+ // policy.
+ //
+ // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
+ // trimmed-to-dust HTLC outbound balance and this new payment as included on next
+ // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the
+ // update. At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC
+ // inbound and trimmed-to-dust HTLC outbound balance and this new received HTLC as included
+ // on next counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail
+ // the update. Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel
+ // might be available again for HTLC processing once the dust bandwidth has cleared up.
+
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let mut config = test_default_channel_config();
+ config.channel_options.max_dust_htlc_exposure_msat = 5_000_000; // default setting value
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
+ let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ open_channel.max_htlc_value_in_flight_msat = 50_000_000;
+ open_channel.max_accepted_htlcs = 60;
+ if on_holder_tx {
+ open_channel.dust_limit_satoshis = 546;
+ }
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
+ let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+
+ let opt_anchors = false;
+
+ let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+
+ if on_holder_tx {
+ if let Some(mut chan) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) {
+ chan.holder_dust_limit_satoshis = 546;
+ }
+ }
+
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
+ check_added_monitors!(nodes[0], 1);
+
+ let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+ let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
+ update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
+
+ let dust_buffer_feerate = {
+ let chan_lock = nodes[0].node.channel_state.lock().unwrap();
+ let chan = chan_lock.by_id.get(&channel_id).unwrap();
+ chan.get_dust_buffer_feerate(None) as u64
+ };
+ let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+ let dust_outbound_htlc_on_holder_tx: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
+
+ let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+ let dust_inbound_htlc_on_holder_tx: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
+
+ let dust_htlc_on_counterparty_tx: u64 = 25;
+ let dust_htlc_on_counterparty_tx_msat: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
+
+ if on_holder_tx {
+ if dust_outbound_balance {
+ // Outbound dust threshold: 2223 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
+ // Outbound dust balance: 4372 sats
+ // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2132 sats
+ for i in 0..dust_outbound_htlc_on_holder_tx {
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_outbound_htlc_on_holder_tx_msat);
+ if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); }
+ }
+ } else {
+ // Inbound dust threshold: 2324 sats (`dust_buffer_feerate` * HTLC_SUCCESS_TX_WEIGHT / 1000 + holder's `dust_limit_satoshis`)
+ // Inbound dust balance: 4372 sats
+ // Note, we need sent payment to be above outbound dust threshold on counterparty_tx of 2031 sats
+ for _ in 0..dust_inbound_htlc_on_holder_tx {
+ route_payment(&nodes[1], &[&nodes[0]], dust_inbound_htlc_on_holder_tx_msat);
+ }
+ }
+ } else {
+ if dust_outbound_balance {
+ // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
+ // Outbound dust balance: 5000 sats
+ for i in 0..dust_htlc_on_counterparty_tx {
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat);
+ if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); }
+ }
+ } else {
+ // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
+ // Inbound dust balance: 5000 sats
+ for _ in 0..dust_htlc_on_counterparty_tx {
+ route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat);
+ }
+ }
+ }
+
+ let dust_overflow = dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx + 1);
+ if exposure_breach_event == ExposureEvent::AtHTLCForward {
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat });
+ let mut config = UserConfig::default();
+ // With default dust exposure: 5000 sats
+ if on_holder_tx {
+ let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * (dust_outbound_htlc_on_holder_tx + 1);
+ let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * dust_inbound_htlc_on_holder_tx + dust_outbound_htlc_on_holder_tx_msat;
+ unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_options.max_dust_htlc_exposure_msat)));
+ } else {
+ unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_options.max_dust_htlc_exposure_msat)));
+ }
+ } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat });
+ nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let mut events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.remove(0));
+ nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
+ // With default dust exposure: 5000 sats
+ if on_holder_tx {
+ // Outbound dust balance: 6399 sats
+ let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
+ let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
+ nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_options.max_dust_htlc_exposure_msat), 1);
+ } else {
+ // Outbound dust balance: 5200 sats
+ nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_options.max_dust_htlc_exposure_msat), 1);
+ }
+ } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 2_500_000);
+ if let Err(_) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at update_fee-swallowed HTLC", ); }
+ {
+ let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock = *feerate_lock * 10;
+ }
+ nodes[0].node.timer_tick_occurred();
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].logger.assert_log_contains("lightning::ln::channel".to_string(), "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure".to_string(), 1);
+ }
+
+ let _ = nodes[0].node.get_and_clear_pending_msg_events();
+ let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
+ added_monitors.clear();
+}
+
+#[test]
+fn test_max_dust_htlc_exposure() {
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true);
+}