use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
-use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT};
+use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel};
use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
-use crate::ln::channel::{Channel, ChannelError};
+use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
use crate::ln::{chan_utils, onion_utils};
use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
-use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route};
-use crate::ln::features::{ChannelFeatures, NodeFeatures};
+use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route};
+use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
use crate::util::enforcing_trait_impls::EnforcingSigner;
use crate::util::errors::APIError;
use crate::util::ser::{Writeable, ReadableArgs};
use crate::util::string::UntrustedString;
-use crate::util::config::UserConfig;
+use crate::util::config::{UserConfig, MaxDustHTLCExposure};
use bitcoin::hash_types::BlockHash;
use bitcoin::blockdata::script::{Builder, Script};
use crate::ln::functional_test_utils::*;
use crate::ln::chan_utils::CommitmentTransaction;
+use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
+
#[test]
fn test_insane_channel_opens() {
// Stand up a network of 2 nodes
// Instantiate channel parameters where we push the maximum msats given our
// funding satoshis
let channel_value_sat = 31337; // same as funding satoshis
- let channel_reserve_satoshis = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
+ let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
// Have node0 initiate a channel to node1 with aforementioned parameters
// Have node0 initiate a channel to node1 with aforementioned parameters
let mut push_amt = 100_000_000;
let feerate_per_kw = 253;
- let opt_anchors = false;
- push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
- push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+ let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
+ push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
+ push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap();
let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
let mut sender_node_per_peer_lock;
let mut sender_node_peer_state_lock;
- let mut chan = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
- chan.holder_selected_channel_reserve_satoshis = 0;
- chan.holder_max_htlc_value_in_flight_msat = 100_000_000;
+ if send_from_initiator {
+ let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+ chan.context.holder_selected_channel_reserve_satoshis = 0;
+ chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+ } else {
+ let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+ chan.context.holder_selected_channel_reserve_satoshis = 0;
+ chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+ }
}
let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
// Note that for outbound channels we have to consider the commitment tx fee and the
// "fee spike buffer", which is currently a multiple of the total commitment tx fee as
// well as an additional HTLC.
- - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, opt_anchors));
+ - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features));
} else {
send_payment(&nodes[1], &[&nodes[0]], push_amt);
}
let channel_id = chan.2;
let secp_ctx = Secp256k1::new();
let default_config = UserConfig::default();
- let bs_channel_reserve_sats = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
+ let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
- let opt_anchors = false;
+ let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
// Calculate the maximum feerate that A can afford. Note that we don't send an update_fee
// CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we
// calculate two different feerates here - the expected local limit as well as the expected
// remote limit.
- let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(opt_anchors) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
- let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(opt_anchors)) as u32;
+ let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
+ let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32;
{
let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
*feerate_lock = feerate;
//We made sure neither party's funds are below the dust limit and there are no HTLCs here
assert_eq!(commitment_tx.output.len(), 2);
- let total_fee: u64 = commit_tx_fee_msat(feerate, 0, opt_anchors) / 1000;
+ let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000;
let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
actual_fee = channel_value - actual_fee;
assert_eq!(total_fee, actual_fee);
let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
INITIAL_COMMITMENT_NUMBER - 1,
push_sats,
- channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, opt_anchors) / 1000,
- opt_anchors, local_funding, remote_funding,
+ channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000,
+ local_funding, remote_funding,
commit_tx_keys.clone(),
non_buffer_feerate + 4,
&mut htlcs,
- &local_chan.channel_transaction_parameters.as_counterparty_broadcastable()
+ &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
);
local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
};
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
+ [nodes[0].node.get_our_node_id()], channel_value);
}
#[test]
send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
// Close down the channels...
close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
- check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
mine_transaction(&nodes[0], &remote_txn[0]);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
let channel_reserve = chan_stat.channel_reserve_msat;
// The 2* and +1 are for the fee spike reserve.
- let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, get_opt_anchors!(nodes[0], nodes[1], chan.2));
+ let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2));
let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
let (mut route, our_payment_hash, _, our_payment_secret) =
get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
payment_hash: payment_hash,
cltv_expiry: htlc_cltv,
onion_routing_packet: onion_packet,
+ skimmed_fee_msat: None,
};
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
commitment_number,
95000,
local_chan_balance,
- local_chan.opt_anchors(), local_funding, remote_funding,
+ local_funding, remote_funding,
commit_tx_keys.clone(),
feerate_per_kw,
&mut vec![(accepted_htlc_info, ())],
- &local_chan.channel_transaction_parameters.as_counterparty_broadcastable()
+ &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
);
local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
};
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let default_config = UserConfig::default();
- let opt_anchors = false;
+ let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
let mut push_amt = 100_000_000;
- push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
+ push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
- push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+ push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let default_config = UserConfig::default();
- let opt_anchors = false;
+ let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
// Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
// channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
// transaction fee with 0 HTLCs (183 sats)).
let mut push_amt = 100_000_000;
- push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
- push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+ push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
+ push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
// Send four HTLCs to cover the initial push_msat buffer we're required to include
payment_hash: payment_hash,
cltv_expiry: htlc_cltv,
onion_routing_packet: onion_packet,
+ skimmed_fee_msat: None,
};
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() },
+ [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let default_config = UserConfig::default();
- let opt_anchors = false;
+ let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
// Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
// channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
// transaction fee with 0 HTLCs (183 sats)).
let mut push_amt = 100_000_000;
- push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
- push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+ push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
+ push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt);
let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
- + feerate_per_kw as u64 * htlc_success_tx_weight(opt_anchors) / 1000 * 1000 - 1;
+ + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1;
// In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
// reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
// commitment transaction fee.
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let default_config = UserConfig::default();
- let opt_anchors = false;
+ let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
// Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
// HTLC.
let mut push_amt = 100_000_000;
- push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
+ push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None).unwrap_err(),
APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
// During open, we don't have a "counterparty channel reserve" to check against, so that
// requirement only comes into play on the open_channel handling side.
- push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+ push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap();
let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
open_channel_msg.push_msat += 1;
let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
- let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
+ let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
// Add a 2* and +1 for the fee spike reserve.
- let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors);
+ let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
// Attempt to trigger a channel reserve violation --> payment failure.
- let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, opt_anchors);
+ let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features);
let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
let mut route_2 = route_1.clone();
payment_hash: our_payment_hash_1,
cltv_expiry: htlc_cltv,
onion_routing_packet: onion_packet,
+ skimmed_fee_msat: None,
};
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() },
+ [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
assert_eq!(channels0.len(), 1);
assert_eq!(channels1.len(), 1);
- let reserve = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
+ let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
}
-fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, opt_anchors: bool) -> u64 {
- (commitment_tx_base_weight(opt_anchors) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
+fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 {
+ (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
}
#[test]
let feemsat = 239; // set above
let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2);
- let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_1.2);
+ let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2);
let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
// 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
// Also, ensure that each payment has enough to be over the dust limit to
// ensure it'll be included in each commit tx fee calculation.
- let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, opt_anchors);
+ let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
break;
// the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
// is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
// policy.
- let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors);
+ let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
let amt_msat_1 = recv_value_1 + total_fee_msat;
}
// split the rest to test holding cell
- let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, opt_anchors);
+ let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
- let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, opt_anchors);
+ let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features);
let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
- let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
+ let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
check_closed_broadcast!(nodes[0], true);
assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
// One pending HTLC is discarded by the force-close:
let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
check_closed_broadcast!(nodes[2], true);
assert_eq!(nodes[1].node.list_channels().len(), 0);
assert_eq!(nodes[2].node.list_channels().len(), 1);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
- check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
macro_rules! claim_funds {
($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
check_closed_broadcast!(nodes[3], true);
assert_eq!(nodes[2].node.list_channels().len(), 0);
assert_eq!(nodes[3].node.list_channels().len(), 1);
- check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
- check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
// Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
// confusing us in the following tests.
assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
ChannelMonitorUpdateStatus::Completed);
- check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
- check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[4].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed, [nodes[3].node.get_our_node_id()], 100000);
}
#[test]
node_txn.swap_remove(0);
}
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
mine_transaction(&nodes[0], &revoked_local_txn[0]);
// Verify broadcast of revoked HTLC-timeout
let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
// Broadcast revoked HTLC-timeout on node 1
mine_transaction(&nodes[1], &node_txn[1]);
test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
check_added_monitors!(nodes[1], 1);
mine_transaction(&nodes[0], &node_txn[1]);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
}
get_announce_close_broadcast_events(&nodes, 0, 1);
// Inform nodes[1] that nodes[0] broadcast a stale tx
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output
mine_transaction(&nodes[0], &revoked_local_txn[0]);
get_announce_close_broadcast_events(&nodes, 0, 1);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
{
mine_transaction(&nodes[0], &revoked_local_txn[0]);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
check_added_monitors!(nodes[0], 1);
confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let mut events = nodes[0].node.get_and_clear_pending_events();
expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
match events.last().unwrap() {
mine_transaction(&nodes[2], &commitment_tx[0]);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
- check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx)
assert_eq!(node_txn.len(), 2);
check_spends!(node_txn[0], commitment_tx[0]);
mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn
let commitment_spend =
mine_transaction(&nodes[2], &commitment_tx[0]);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
- check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 0);
// Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
// Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
mine_transaction(&nodes[1], &commitment_tx[0]);
- check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false);
+ check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false
+ , [nodes[2].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
let timeout_tx = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], commitment_tx[0]);
let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
// The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
// well, so HTLCs at exactly the dust limit will not be included in commitment txn.
nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().holder_dust_limit_satoshis * 1000
+ .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context.holder_dust_limit_satoshis * 1000
} else { 3000000 };
let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
payment_hash,
cltv_expiry,
onion_routing_packet,
+ skimmed_fee_msat: None,
};
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
}
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 3);
connect_block(&nodes[1], &block);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
// Duplicate the connect_block call since this may happen due to other listeners
// registering new transactions
nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
- check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
let tx = {
let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
// Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
// Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
// Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
{
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+ reconnect_args.pending_htlc_claims.0 = 1;
+ reconnect_nodes(reconnect_args);
expect_payment_path_successful!(nodes[0]);
}
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
- check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+ check_closed_event!(&nodes[0], 1, ClosureReason::DisconnectedPeer, false
+ , [nodes[1].node.get_our_node_id()], 1000000);
+ check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
+ , [nodes[0].node.get_our_node_id()], 1000000);
}
#[test]
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+ reconnect_args.send_channel_ready = (true, true);
+ reconnect_nodes(reconnect_args);
let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+ reconnect_args.pending_cell_htlc_fails.0 = 1;
+ reconnect_args.pending_cell_htlc_claims.0 = 1;
+ reconnect_nodes(reconnect_args);
{
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 4);
}
// Even if the channel_ready messages get exchanged, as long as nothing further was
// received on either side, both sides will need to resend them.
- reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+ reconnect_args.send_channel_ready = (true, true);
+ reconnect_args.pending_htlc_adds.1 = 1;
+ reconnect_nodes(reconnect_args);
} else if messages_delivered == 3 {
// nodes[0] still wants its RAA + commitment_signed
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+ reconnect_args.pending_htlc_adds.0 = -1;
+ reconnect_args.pending_raa.0 = true;
+ reconnect_nodes(reconnect_args);
} else if messages_delivered == 4 {
// nodes[0] still wants its commitment_signed
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+ reconnect_args.pending_htlc_adds.0 = -1;
+ reconnect_nodes(reconnect_args);
} else if messages_delivered == 5 {
// nodes[1] still wants its final RAA
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+ reconnect_args.pending_raa.1 = true;
+ reconnect_nodes(reconnect_args);
} else if messages_delivered == 6 {
// Everything was delivered...
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
}
let events_1 = nodes[1].node.get_and_clear_pending_events();
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
nodes[1].node.process_pending_htlc_forwards();
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
if messages_delivered < 2 {
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+ reconnect_args.pending_htlc_claims.0 = 1;
+ reconnect_nodes(reconnect_args);
if messages_delivered < 1 {
expect_payment_sent!(nodes[0], payment_preimage_1);
} else {
}
} else if messages_delivered == 2 {
// nodes[0] still wants its RAA + commitment_signed
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+ reconnect_args.pending_htlc_adds.1 = -1;
+ reconnect_args.pending_raa.1 = true;
+ reconnect_nodes(reconnect_args);
} else if messages_delivered == 3 {
// nodes[0] still wants its commitment_signed
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+ reconnect_args.pending_htlc_adds.1 = -1;
+ reconnect_nodes(reconnect_args);
} else if messages_delivered == 4 {
// nodes[1] still wants its final RAA
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
+ let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+ reconnect_args.pending_raa.0 = true;
+ reconnect_nodes(reconnect_args);
} else if messages_delivered == 5 {
// Everything was delivered...
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
}
if messages_delivered == 1 || messages_delivered == 2 {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
}
- reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
if messages_delivered > 2 {
expect_payment_path_successful!(nodes[0]);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+ features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan.3);
nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 1);
mine_transaction(&nodes[1], &node_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
mine_transaction(&nodes[1], &node_txn[0]);
assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
mine_transaction(&nodes[1], &node_txn[0]);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
mine_transaction(&nodes[1], &node_txn[0]);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
expect_payment_failed!(nodes[1], our_payment_hash, false);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 1);
mine_transaction(&nodes[0], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(revoked_htlc_txn.len(), 1);
connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success
mine_transaction(&nodes[2], &commitment_tx[0]);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
- check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
assert_eq!(c_txn.len(), 1);
// Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
mine_transaction(&nodes[1], &commitment_tx[0]);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
// ChannelMonitor: HTLC-Success tx
assert_eq!(b_txn.len(), 1);
mine_transaction(&nodes[1], &commitment_txn[0]);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
let htlc_timeout_tx;
mine_transaction(&nodes[2], &commitment_txn[0]);
check_added_monitors!(nodes[2], 2);
- check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
let events = nodes[2].node.get_and_clear_pending_msg_events();
match events[0] {
MessageSendEvent::UpdateHTLCs { .. } => {},
mine_transaction(&nodes[1], &local_txn[0]);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let events = nodes[1].node.get_and_clear_pending_msg_events();
match events[0] {
MessageSendEvent::UpdateHTLCs { .. } => {},
assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis;
+ .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context.holder_dust_limit_satoshis;
// 0th HTLC:
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
// 1st HTLC:
mine_transaction(&nodes[0], &local_txn[0]);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
let htlc_timeout = {
connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
let htlc_timeout = {
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
mine_transaction(&nodes[0], &closing_tx);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
check_spends!(spend_txn[0], closing_tx);
mine_transaction(&nodes[1], &closing_tx);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
}
fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
}
fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
} else {
expect_payment_failed!(nodes[0], our_payment_hash, true);
}
let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
let channel_reserve = chan_stat.channel_reserve_msat;
let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
- let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
+ let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
// 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
- let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
+ let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
// Send a payment which passes reserve checks but gets stuck in the holding cell.
let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
let channel_reserve = chan_stat.channel_reserve_msat;
let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
- let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
+ let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
// 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
let amt_1 = 20000;
- let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors) - amt_1;
+ let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) - amt_1;
let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1);
let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2);
let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2);
let channel_reserve = chan_stat.channel_reserve_msat;
let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2);
- let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_0_1.2);
+ let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2);
// Send a payment which passes reserve checks but gets stuck in the holding cell.
- let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
+ let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send);
let payment_event = {
nodes[0].node.send_payment_with_route(&route, our_payment_hash,
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
check_closed_broadcast!(nodes[1], true).unwrap();
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
+ [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64;
+ .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.counterparty_max_accepted_htlcs as u64;
// Fetch a route in advance as we will be unable to once we're unable to send.
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
- htlc_minimum_msat = channel.get_holder_htlc_minimum_msat();
+ htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat();
}
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
let channel_reserve = chan_stat.channel_reserve_msat;
let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
- let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
+ let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
// The 2* and +1 are for the fee spike reserve.
- let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
+ let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
payment_hash: our_payment_hash,
cltv_expiry: htlc_cltv,
onion_routing_packet: onion_packet.clone(),
+ skimmed_fee_msat: None,
};
for i in 0..50 {
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
//Disconnect and Reconnect
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+ features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 1);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 1);
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000);
}
#[test]
let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
+ .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
// We route 2 dust-HTLCs between A and B
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
+ .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
if local {
// We fail dust-HTLC 1 by broadcast of local commitment tx
mine_transaction(&nodes[0], &as_commitment_tx[0]);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
expect_payment_failed!(nodes[0], dust_hash, false);
mine_transaction(&nodes[0], &bs_commitment_tx[0]);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
- if let Err(error) = Channel::new_outbound(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+ // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
+ if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
&nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
&low_our_to_self_config, 0, 42)
{
}
} else { assert!(false) }
- // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_from_req()
+ // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
open_channel.to_self_delay = 200;
- if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+ if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
&nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
&low_our_to_self_config, 0, &nodes[0].logger, 42)
{
_ => { panic!(); }
}
} else { panic!(); }
- check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000);
- // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
+ // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
open_channel.to_self_delay = 200;
- if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+ if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
&nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
&high_their_to_self_config, 0, &nodes[0].logger, 42)
{
}
}
// Reconnect peers
- nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+ features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
assert_eq!(reestablish_1.len(), 3);
- nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
assert_eq!(reestablish_2.len(), 3);
connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
let revoked_htlc_txn = {
});
assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }
+ , [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
mine_transaction(&nodes[0], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000);
let penalty_txn = {
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3
connect_blocks(&nodes[1], 1);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut);
+ check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(close_ev.len(), 1);
match close_ev[0] {
}
_ => panic!("Unexpected event"),
}
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
let accept_chan_msg = {
let mut node_1_per_peer_lock;
let mut node_1_peer_state_lock;
- let channel = get_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id);
+ let channel = get_inbound_v1_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id);
channel.get_accept_channel_message()
};
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
_ => panic!("Unexpected event"),
}
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() }
+ , [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
RecipientOnionFields::secret_only(our_payment_secret), height + 1, &None).unwrap();
// Edit amt_to_forward to simulate the sender having set
// the final amount and the routing node taking less fee
- onion_payloads[1].amt_to_forward = 99_000;
+ if let msgs::OutboundOnionPayload::Receive { ref mut amt_msat, .. } = onion_payloads[1] {
+ *amt_msat = 99_000;
+ } else { panic!() }
let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
payment_event.msgs[0].onion_routing_packet = new_onion_packet;
}
}
}
-#[test]
-#[allow(deprecated)]
-fn test_secret_timeout() {
- // Simple test of payment secret storage time outs. After
- // `create_inbound_payment(_for_hash)_legacy` is removed, this test will be removed as well.
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
- create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
-
- let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment_legacy(Some(100_000), 2).unwrap();
-
- // We should fail to register the same payment hash twice, at least until we've connected a
- // block with time 7200 + CHAN_CONFIRM_DEPTH + 1.
- if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) {
- assert_eq!(err, "Duplicate payment hash");
- } else { panic!(); }
- let mut block = {
- let node_1_blocks = nodes[1].blocks.lock().unwrap();
- create_dummy_block(node_1_blocks.last().unwrap().0.block_hash(), node_1_blocks.len() as u32 + 7200, Vec::new())
- };
- connect_block(&nodes[1], &block);
- if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) {
- assert_eq!(err, "Duplicate payment hash");
- } else { panic!(); }
-
- // If we then connect the second block, we should be able to register the same payment hash
- // again (this time getting a new payment secret).
- block.header.prev_blockhash = block.header.block_hash();
- block.header.time += 1;
- connect_block(&nodes[1], &block);
- let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2).unwrap();
- assert_ne!(payment_secret_1, our_payment_secret);
-
- {
- let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
- nodes[0].node.send_payment_with_route(&route, payment_hash,
- RecipientOnionFields::secret_only(our_payment_secret), PaymentId(payment_hash.0)).unwrap();
- check_added_monitors!(nodes[0], 1);
- let mut events = nodes[0].node.get_and_clear_pending_msg_events();
- let mut payment_event = SendEvent::from_event(events.pop().unwrap());
- nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
- commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
- }
- // Note that after leaving the above scope we have no knowledge of any arguments or return
- // values from previous calls.
- expect_pending_htlcs_forwardable!(nodes[1]);
- let events = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- Event::PaymentClaimable { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret }, .. } => {
- assert!(payment_preimage.is_none());
- assert_eq!(payment_secret, our_payment_secret);
- // We don't actually have the payment preimage with which to claim this payment!
- },
- _ => panic!("Unexpected event"),
- }
-}
-
#[test]
fn test_bad_secret_hash() {
// Simple test of unregistered payment hash/invalid payment secret handling
let height = HTLC_TIMEOUT_BROADCAST + 1;
connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
check_closed_broadcast(&nodes[0], 1, true);
- check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false);
+ check_closed_event!(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
+ [nodes[1].node.get_our_node_id()], 100000);
watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
check_added_monitors(&nodes[0], 1);
{
let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
- check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true);
+ check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
+ [nodes[1].node.get_our_node_id(); 2], 100000);
}
#[test]
chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
connect_blocks(&nodes[0], TEST_FINAL_CLTV);
let htlc_timeout = {
nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[force_closing_node], true);
check_added_monitors!(nodes[force_closing_node], 1);
- check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
if go_onchain_before_fulfill {
let txn_to_broadcast = match broadcast_alice {
true => alice_txn.clone(),
if broadcast_alice {
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
}
}
if broadcast_alice {
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
}
let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
if broadcast_alice {
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
- let funding_created = {
+ let (_, funding_created) = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
- // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as
+ // Once we call `get_funding_created` the channel has a duplicate channel_id as
// another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
// try to create another channel. Instead, we drop the channel entirely here (leaving the
// channelmanager in a possibly nonsense state instead).
- let mut as_chan = a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
+ let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
let logger = test_utils::TestLogger::new();
- as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap()
+ as_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
};
check_added_monitors!(nodes[0], 0);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], false);
- check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) });
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
+ [nodes[1].node.get_our_node_id()], 100000);
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
check_added_monitors!(nodes[0], 2);
- check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) });
+ check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
+ [nodes[1].node.get_our_node_id(); 2], 100000);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
match events[0] {
let expected_err = "funding tx had wrong script/value or output index";
confirm_transaction_at(&nodes[1], &tx, 1);
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() });
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() },
+ [nodes[0].node.get_our_node_id()], 100000);
check_added_monitors!(nodes[1], 1);
let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events_2.len(), 1);
nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[1], true);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 1);
expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true);
}
-#[test]
-fn test_keysend_payments_to_public_node() {
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
- let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
- let network_graph = nodes[0].network_graph.clone();
- let payer_pubkey = nodes[0].node.get_our_node_id();
- let payee_pubkey = nodes[1].node.get_our_node_id();
- let route_params = RouteParameters {
- payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
- final_value_msat: 10000,
- };
- let scorer = test_utils::TestScorer::new();
- let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
- let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
-
- let test_preimage = PaymentPreimage([42; 32]);
- let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage),
- RecipientOnionFields::spontaneous_empty(), PaymentId(test_preimage.0)).unwrap();
- check_added_monitors!(nodes[0], 1);
- let mut events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- let event = events.pop().unwrap();
- let path = vec![&nodes[1]];
- pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
- claim_payment(&nodes[0], &path, test_preimage);
-}
-
-#[test]
-fn test_keysend_payments_to_private_node() {
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
- let payer_pubkey = nodes[0].node.get_our_node_id();
- let payee_pubkey = nodes[1].node.get_our_node_id();
-
- let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
- let route_params = RouteParameters {
- payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
- final_value_msat: 10000,
- };
- let network_graph = nodes[0].network_graph.clone();
- let first_hops = nodes[0].node.list_usable_channels();
- let scorer = test_utils::TestScorer::new();
- let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
- let route = find_route(
- &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
- nodes[0].logger, &scorer, &(), &random_seed_bytes
- ).unwrap();
-
- let test_preimage = PaymentPreimage([42; 32]);
- let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage),
- RecipientOnionFields::spontaneous_empty(), PaymentId(test_preimage.0)).unwrap();
- check_added_monitors!(nodes[0], 1);
- let mut events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- let event = events.pop().unwrap();
- let path = vec![&nodes[1]];
- pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
- claim_payment(&nodes[0], &path, test_preimage);
-}
-
#[test]
fn test_double_partial_claim() {
// Test what happens if a node receives a payment, generates a PaymentClaimable event, the HTLCs
AtUpdateFeeOutbound,
}
-fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool) {
+fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool) {
// Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
// policy.
//
let chanmon_cfgs = create_chanmon_cfgs(2);
let mut config = test_default_channel_config();
- config.channel_config.max_dust_htlc_exposure_msat = 5_000_000; // default setting value
+ config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit {
+ // Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253
+ // to get roughly the same initial value as the default setting when this test was
+ // originally written.
+ MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253)
+ } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000) }; // initial default setting value
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
- let opt_anchors = false;
+ let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
if on_holder_tx {
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
- let mut chan = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
- chan.holder_dust_limit_satoshis = 546;
+ let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
+ chan.context.holder_dust_limit_satoshis = 546;
}
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
let (mut route, payment_hash, _, payment_secret) =
get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
- let dust_buffer_feerate = {
+ let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
- chan.get_dust_buffer_feerate(None) as u64
+ (chan.context.get_dust_buffer_feerate(None) as u64,
+ chan.context.get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
};
- let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
- let dust_outbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
+ let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+ let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
- let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
- let dust_inbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
+ let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+ let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
let dust_htlc_on_counterparty_tx: u64 = 4;
- let dust_htlc_on_counterparty_tx_msat: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
+ let dust_htlc_on_counterparty_tx_msat: u64 = max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
if on_holder_tx {
if dust_outbound_balance {
), true, APIError::ChannelUnavailable { .. }, {});
}
} else if exposure_breach_event == ExposureEvent::AtHTLCReception {
- let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 });
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 });
nodes[1].node.send_payment_with_route(&route, payment_hash,
RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
check_added_monitors!(nodes[1], 1);
// Outbound dust balance: 6399 sats
let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
- nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_config.max_dust_htlc_exposure_msat), 1);
+ nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
} else {
// Outbound dust balance: 5200 sats
nodes[0].logger.assert_log("lightning::ln::channel".to_string(),
format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
- dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 1,
- config.channel_config.max_dust_htlc_exposure_msat), 1);
+ dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4,
+ max_dust_htlc_exposure_msat), 1);
}
} else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000;
- nodes[0].node.send_payment_with_route(&route, payment_hash,
- RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ // For the multiplier dust exposure limit, since it scales with feerate,
+ // we need to add a lot of HTLCs that will become dust at the new feerate
+ // to cross the threshold.
+ for _ in 0..20 {
+ let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None);
+ nodes[0].node.send_payment_with_route(&route, payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ }
{
let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
*feerate_lock = *feerate_lock * 10;
added_monitors.clear();
}
+fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool) {
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
+ do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
+ do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
+}
+
#[test]
fn test_max_dust_htlc_exposure() {
- do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true);
- do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true);
- do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true);
- do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false);
- do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false);
- do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false);
- do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true);
- do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false);
- do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true);
- do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false);
- do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false);
- do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true);
+ do_test_max_dust_htlc_exposure_by_threshold_type(false);
+ do_test_max_dust_htlc_exposure_by_threshold_type(true);
}
#[test]
MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
- err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() });
+ err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() },
+ [nodes[0].node.get_our_node_id()], 100000);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
},
do_payment_with_custom_min_final_cltv_expiry(true, false);
do_payment_with_custom_min_final_cltv_expiry(true, true);
}
+
+#[test]
+fn test_disconnects_peer_awaiting_response_ticks() {
+ // Tests that nodes which are awaiting on a response critical for channel responsiveness
+ // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Asserts a disconnect event is queued to the user.
+ let check_disconnect_event = |node: &Node, should_disconnect: bool| {
+ let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event|
+ if let MessageSendEvent::HandleError { action, .. } = event {
+ if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action {
+ Some(())
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+ );
+ assert_eq!(disconnect_event.is_some(), should_disconnect);
+ };
+
+ // Fires timer ticks ensuring we only attempt to disconnect peers after reaching
+ // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+ let check_disconnect = |node: &Node| {
+ // No disconnect without any timer ticks.
+ check_disconnect_event(node, false);
+
+ // No disconnect with 1 timer tick less than required.
+ for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 {
+ node.node.timer_tick_occurred();
+ check_disconnect_event(node, false);
+ }
+
+ // Disconnect after reaching the required ticks.
+ node.node.timer_tick_occurred();
+ check_disconnect_event(node, true);
+
+ // Disconnect again on the next tick if the peer hasn't been disconnected yet.
+ node.node.timer_tick_occurred();
+ check_disconnect_event(node, true);
+ };
+
+ create_chan_between_nodes(&nodes[0], &nodes[1]);
+
+ // We'll start by performing a fee update with Alice (nodes[0]) on the channel.
+ *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
+ nodes[0].node.timer_tick_occurred();
+ check_added_monitors!(&nodes[0], 1);
+ let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap());
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed);
+ check_added_monitors!(&nodes[1], 1);
+
+ // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`.
+ let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack);
+ check_added_monitors!(&nodes[0], 1);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed);
+ check_added_monitors(&nodes[0], 1);
+
+ // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We
+ // pretend Bob hasn't received the message and check whether he'll disconnect Alice after
+ // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+ let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ check_disconnect(&nodes[1]);
+
+ // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message.
+ //
+ // Note that since the commitment dance didn't complete above, Alice is expected to resend her
+ // final `RevokeAndACK` to Bob to complete it.
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ let bob_init = msgs::Init {
+ features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+ };
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap();
+ let alice_init = msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ };
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap();
+
+ // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't
+ // received Bob's yet, so she should disconnect him after reaching
+ // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+ let alice_channel_reestablish = get_event_msg!(
+ nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()
+ );
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish);
+ check_disconnect(&nodes[0]);
+
+ // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live".
+ let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event|
+ if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ Some(msg.clone())
+ } else {
+ None
+ }
+ ).unwrap();
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish);
+
+ // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages.
+ for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
+ nodes[0].node.timer_tick_occurred();
+ check_disconnect_event(&nodes[0], false);
+ }
+
+ // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after
+ // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+ check_disconnect(&nodes[1]);
+
+ // Finally, have Bob process the last message.
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack);
+ check_added_monitors(&nodes[1], 1);
+
+ // At this point, neither node should attempt to disconnect each other, since they aren't
+ // waiting on any messages.
+ for node in &nodes {
+ for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
+ node.node.timer_tick_occurred();
+ check_disconnect_event(node, false);
+ }
+ }
+}
+
+#[test]
+fn test_remove_expired_outbound_unfunded_channels() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
+ let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::FundingGenerationReady { .. } => (),
+ _ => panic!("Unexpected event"),
+ };
+
+ // Asserts the outbound channel has been removed from a nodes[0]'s peer state map.
+ let check_outbound_channel_existence = |should_exist: bool| {
+ let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+ let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
+ assert_eq!(chan_lock.outbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+ };
+
+ // Channel should exist without any timer ticks.
+ check_outbound_channel_existence(true);
+
+ // Channel should exist with 1 timer tick less than required.
+ for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
+ nodes[0].node.timer_tick_occurred();
+ check_outbound_channel_existence(true)
+ }
+
+ // Remove channel after reaching the required ticks.
+ nodes[0].node.timer_tick_occurred();
+ check_outbound_channel_existence(false);
+
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+}
+
+#[test]
+fn test_remove_expired_inbound_unfunded_channels() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+ let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
+ let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::FundingGenerationReady { .. } => (),
+ _ => panic!("Unexpected event"),
+ };
+
+ // Asserts the inbound channel has been removed from a nodes[1]'s peer state map.
+ let check_inbound_channel_existence = |should_exist: bool| {
+ let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
+ let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
+ assert_eq!(chan_lock.inbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+ };
+
+ // Channel should exist without any timer ticks.
+ check_inbound_channel_existence(true);
+
+ // Channel should exist with 1 timer tick less than required.
+ for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
+ nodes[1].node.timer_tick_occurred();
+ check_inbound_channel_existence(true)
+ }
+
+ // Remove channel after reaching the required ticks.
+ nodes[1].node.timer_tick_occurred();
+ check_inbound_channel_existence(false);
+
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+}