X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=63b3b2bb0a49d9ba661f00050ddb7c2c031ed098;hb=4cee62233cad5cc80e29208e7e7f633324a4abaf;hp=f2b05b6e1ede4fa05d1fd8eee35679f82e0c88e6;hpb=8d50c919cfa3eee5eaf71d4b142ff5cbfddd4b56;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index f2b05b6e..63b3b2bb 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -17,16 +17,16 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor; use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use crate::chain::transaction::OutPoint; -use crate::chain::keysinterface::{ChannelSigner, EcdsaChannelSigner, EntropySource}; +use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash}; -use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT}; +use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; -use crate::ln::channel::{Channel, ChannelError}; +use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; use crate::ln::{chan_utils, onion_utils}; use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment}; use crate::routing::gossip::{NetworkGraph, NetworkUpdate}; -use crate::routing::router::{PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route}; +use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route}; use crate::ln::features::{ChannelFeatures, NodeFeatures}; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; @@ -38,12 +38,11 @@ use crate::util::string::UntrustedString; use crate::util::config::UserConfig; use bitcoin::hash_types::BlockHash; -use bitcoin::blockdata::block::{Block, BlockHeader}; use bitcoin::blockdata::script::{Builder, Script}; use bitcoin::blockdata::opcodes; use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; -use bitcoin::{PackedLockTime, Sequence, Transaction, TxIn, TxMerkleNode, TxOut, Witness}; +use bitcoin::{PackedLockTime, Sequence, Transaction, TxIn, TxOut, Witness}; use bitcoin::OutPoint as BitcoinOutPoint; use bitcoin::secp256k1::Secp256k1; @@ -76,7 +75,7 @@ fn test_insane_channel_opens() { // Instantiate channel parameters where we push the maximum msats given our // funding satoshis let channel_value_sat = 31337; // same as funding satoshis - let channel_reserve_satoshis = Channel::::get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg); + let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg); let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000; // Have node0 initiate a channel to node1 with aforementioned parameters @@ -158,7 +157,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { let feerate_per_kw = 253; let opt_anchors = false; push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; - push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap(); let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); @@ -180,9 +179,15 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] }; let mut sender_node_per_peer_lock; let mut sender_node_peer_state_lock; - let mut chan = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); - chan.holder_selected_channel_reserve_satoshis = 0; - chan.holder_max_htlc_value_in_flight_msat = 100_000_000; + if send_from_initiator { + let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); + chan.context.holder_selected_channel_reserve_satoshis = 0; + chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000; + } else { + let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); + chan.context.holder_selected_channel_reserve_satoshis = 0; + chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000; + } } let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id); @@ -509,10 +514,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); if steps & 0b1000_0000 != 0{ - let block = Block { - header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }, - txdata: vec![], - }; + let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new()); connect_block(&nodes[0], &block); connect_block(&nodes[1], &block); } @@ -647,7 +649,7 @@ fn test_update_fee_that_funder_cannot_afford() { let channel_id = chan.2; let secp_ctx = Secp256k1::new(); let default_config = UserConfig::default(); - let bs_channel_reserve_sats = Channel::::get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); + let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); let opt_anchors = false; @@ -732,7 +734,7 @@ fn test_update_fee_that_funder_cannot_afford() { commit_tx_keys.clone(), non_buffer_feerate + 4, &mut htlcs, - &local_chan.channel_transaction_parameters.as_counterparty_broadcastable() + &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable() ); local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() }; @@ -1044,7 +1046,7 @@ fn fake_network_test() { }); hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; - let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![hops], payment_params: None }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0; + let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![Path { hops, blinded_tail: None }], payment_params: None }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0; let mut hops = Vec::with_capacity(3); hops.push(RouteHop { @@ -1073,7 +1075,7 @@ fn fake_network_test() { }); hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; - let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![hops], payment_params: None }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1; + let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![Path { hops, blinded_tail: None }], payment_params: None }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1; // Claim the rebalances... fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2); @@ -1106,6 +1108,9 @@ fn holding_cell_htlc_counting() { create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + // Fetch a route in advance as we will be unable to once we're unable to send. + let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); + let mut payments = Vec::new(); for _ in 0..50 { let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); @@ -1123,14 +1128,11 @@ fn holding_cell_htlc_counting() { // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in // the holding cell waiting on B's RAA to send. At this point we should not be able to add // another HTLC. - let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); { unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash_1, RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err))); + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1); } // This should also be true if we try to forward a payment. @@ -1284,7 +1286,7 @@ fn test_duplicate_htlc_different_direction_onchain() { mine_transaction(&nodes[0], &remote_txn[0]); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); - connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires + connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(claim_txn.len(), 3); @@ -1346,21 +1348,19 @@ fn test_basic_channel_reserve() { // The 2* and +1 are for the fee spike reserve. let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, get_opt_anchors!(nodes[0], nodes[1], chan.2)); let max_can_send = 5000000 - channel_reserve - commit_tx_fee; - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send + 1); + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); + route.paths[0].hops.last_mut().unwrap().fee_msat += 1; let err = nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap(); match err { PaymentSendFailure::AllFailedResendSafe(ref fails) => { - match &fails[0] { - &APIError::ChannelUnavailable{ref err} => - assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)), - _ => panic!("Unexpected error variant"), - } + if let &APIError::ChannelUnavailable { .. } = &fails[0] {} + else { panic!("Unexpected error variant"); } }, _ => panic!("Unexpected error variant"), } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 1); send_payment(&nodes[0], &vec![&nodes[1]], max_can_send); } @@ -1373,7 +1373,9 @@ fn test_fee_spike_violation_fails_htlc() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 3460001); + let (mut route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 3460000); + route.paths[0].hops[0].fee_msat += 1; // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() let secp_ctx = Secp256k1::new(); let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!"); @@ -1383,7 +1385,7 @@ fn test_fee_spike_violation_fails_htlc() { let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap(); let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 3460001, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash); + let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); let msg = msgs::UpdateAddHTLC { channel_id: chan.2, htlc_id: 0, @@ -1391,6 +1393,7 @@ fn test_fee_spike_violation_fails_htlc() { payment_hash: payment_hash, cltv_expiry: htlc_cltv, onion_routing_packet: onion_packet, + skimmed_fee_msat: None, }; nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); @@ -1457,11 +1460,11 @@ fn test_fee_spike_violation_fails_htlc() { commitment_number, 95000, local_chan_balance, - local_chan.opt_anchors(), local_funding, remote_funding, + local_chan.context.opt_anchors(), local_funding, remote_funding, commit_tx_keys.clone(), feerate_per_kw, &mut vec![(accepted_htlc_info, ())], - &local_chan.channel_transaction_parameters.as_counterparty_broadcastable() + &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable() ); local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() }; @@ -1521,23 +1524,22 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { let mut push_amt = 100_000_000; push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors); - push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); + // Fetch a route in advance as we will be unable to once we're unable to send. + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000); // Sending exactly enough to hit the reserve amount should be accepted for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); } // However one more HTLC should be significantly over the reserve amount and fail. - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000); unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value")); + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put counterparty balance under holder-announced channel reserve value".to_string(), 1); } #[test] @@ -1555,7 +1557,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { // transaction fee with 0 HTLCs (183 sats)). let mut push_amt = 100_000_000; push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors); - push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); // Send four HTLCs to cover the initial push_msat buffer we're required to include @@ -1563,7 +1565,9 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); } - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 700_000); + let (mut route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 1000); + route.paths[0].hops[0].fee_msat = 700_000; // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() let secp_ctx = Secp256k1::new(); let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); @@ -1571,7 +1575,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap(); let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 700_000, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash); + let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); let msg = msgs::UpdateAddHTLC { channel_id: chan.2, htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64, @@ -1579,6 +1583,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { payment_hash: payment_hash, cltv_expiry: htlc_cltv, onion_routing_packet: onion_packet, + skimmed_fee_msat: None, }; nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg); @@ -1609,7 +1614,7 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { // transaction fee with 0 HTLCs (183 sats)). let mut push_amt = 100_000_000; push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors); - push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt); let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 @@ -1625,11 +1630,12 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { } // One more than the dust amt should fail, however. - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt + 1); + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt); + route.paths[0].hops[0].fee_msat += 1; unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value")); + ), true, APIError::ChannelUnavailable { .. }, {}); } #[test] @@ -1653,7 +1659,7 @@ fn test_chan_init_feerate_unaffordability() { // During open, we don't have a "counterparty channel reserve" to check against, so that // requirement only comes into play on the open_channel handling side. - push_amt -= Channel::::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap(); let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); open_channel_msg.push_msat += 1; @@ -1736,7 +1742,8 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, opt_anchors); let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1; let amt_msat_2 = recv_value_2 + total_routing_fee_msat; - let (route_2, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_2); + let mut route_2 = route_1.clone(); + route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2; // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() let secp_ctx = Secp256k1::new(); @@ -1745,7 +1752,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap(); let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( &route_2.paths[0], recv_value_2, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1); + let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap(); let msg = msgs::UpdateAddHTLC { channel_id: chan.2, htlc_id: 1, @@ -1753,6 +1760,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { payment_hash: our_payment_hash_1, cltv_expiry: htlc_cltv, onion_routing_packet: onion_packet, + skimmed_fee_msat: None, }; nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); @@ -1778,7 +1786,7 @@ fn test_inbound_outbound_capacity_is_not_zero() { assert_eq!(channels0.len(), 1); assert_eq!(channels1.len(), 1); - let reserve = Channel::::get_holder_selected_channel_reserve_satoshis(100_000, &default_config); + let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config); assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000); assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000); @@ -1829,17 +1837,15 @@ fn test_channel_reserve_holding_cell_htlcs() { // attempt to send amt_msat > their_max_htlc_value_in_flight_msat { let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_features(nodes[2].node.invoice_features()).with_max_channel_saturation_power_of_half(0); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0, TEST_FINAL_CLTV); - route.paths[0].last_mut().unwrap().fee_msat += 1; - assert!(route.paths[0].iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); + .with_bolt11_features(nodes[2].node.invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0); + let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0); + route.paths[0].hops.last_mut().unwrap().fee_msat += 1; + assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err))); + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1); } // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete @@ -1856,8 +1862,8 @@ fn test_channel_reserve_holding_cell_htlcs() { } let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_features(nodes[2].node.invoice_features()).with_max_channel_saturation_power_of_half(0); - let route = get_route!(nodes[0], payment_params, recv_value_0, TEST_FINAL_CLTV).unwrap(); + .with_bolt11_features(nodes[2].node.invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0); + let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap(); let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); @@ -1905,11 +1911,12 @@ fn test_channel_reserve_holding_cell_htlcs() { // channel reserve test with htlc pending output > 0 let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs; { - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_2 + 1); + let mut route = route_1.clone(); + route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1; + let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err))); + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -1934,13 +1941,13 @@ fn test_channel_reserve_holding_cell_htlcs() { // test with outbound holding cell amount > 0 { - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22+1); + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); + route.paths[0].hops.last_mut().unwrap().fee_msat += 1; unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err))); + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 2); } let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); @@ -2438,7 +2445,7 @@ fn test_justice_tx_htlc_timeout() { test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE); mine_transaction(&nodes[0], &revoked_local_txn[0]); - connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires + connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires // Verify broadcast of revoked HTLC-timeout let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT); check_added_monitors!(nodes[0], 1); @@ -2763,9 +2770,8 @@ fn test_htlc_on_chain_success() { assert_eq!(node_txn[1].lock_time.0, 0); // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward - let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42}; - connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]}); - connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires + connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()])); + connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); @@ -2894,15 +2900,14 @@ fn test_htlc_on_chain_success() { assert_eq!(commitment_spend.input.len(), 2); assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - assert_eq!(commitment_spend.lock_time.0, nodes[1].best_block_info().1 + 1); + assert_eq!(commitment_spend.lock_time.0, nodes[1].best_block_info().1); assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment // We don't bother to check that B can claim the HTLC output on its commitment tx here as // we already checked the same situation with A. // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent - let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42}; - connect_block(&nodes[0], &Block { header, txdata: vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()] }); - connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires + connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()])); + connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); @@ -2991,9 +2996,9 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { if nodes[1].connect_style.borrow().skips_blocks() { assert_eq!(txn.len(), 1); } else { - assert_eq!(txn.len(), 2); // Extra rebroadcast of timeout transaction + assert_eq!(txn.len(), 3); // Two extra fee bumps for timeout transaction } - check_spends!(txn[0], commitment_tx[0]); + txn.iter().for_each(|tx| check_spends!(tx, commitment_tx[0])); assert_eq!(txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); txn.remove(0) }; @@ -3024,7 +3029,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { check_spends!(commitment_tx[0], chan_1.3); mine_transaction(&nodes[0], &commitment_tx[0]); - connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires + connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -3126,7 +3131,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as // well, so HTLCs at exactly the dust limit will not be included in commitment txn. nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().holder_dust_limit_satoshis * 1000 + .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context.holder_dust_limit_satoshis * 1000 } else { 3000000 }; let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); @@ -3398,7 +3403,7 @@ fn fail_backward_pending_htlc_upon_channel_failure() { let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads( &route.paths[0], 50_000, RecipientOnionFields::secret_only(payment_secret), current_height, &None).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap(); - let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash); + let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); // Send a 0-msat update_add_htlc to fail the channel. let update_add_htlc = msgs::UpdateAddHTLC { @@ -3408,6 +3413,7 @@ fn fail_backward_pending_htlc_upon_channel_failure() { payment_hash, cltv_expiry, onion_routing_packet, + skimmed_fee_msat: None, }; nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc); } @@ -3461,17 +3467,17 @@ fn test_htlc_ignore_latest_remote_commitment() { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 3); - assert_eq!(node_txn[0], node_txn[1]); + assert_eq!(node_txn[0].txid(), node_txn[1].txid()); - let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]}); + let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[1].clone()]); + connect_block(&nodes[1], &block); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); // Duplicate the connect_block call since this may happen due to other listeners // registering new transactions - connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[2].clone()]}); + connect_block(&nodes[1], &block); } #[test] @@ -3618,8 +3624,8 @@ fn test_peer_disconnected_before_funding_broadcasted() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer); - check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer); + check_closed_event(&nodes[0], 1, ClosureReason::DisconnectedPeer, false); + check_closed_event(&nodes[1], 1, ClosureReason::DisconnectedPeer, false); } #[test] @@ -4031,10 +4037,14 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -4152,10 +4162,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { route_payment(&nodes[0], &[&nodes[1]], 100000).1 }; - let mut block = Block { - header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }, - txdata: vec![], - }; + let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new()); connect_block(&nodes[0], &block); connect_block(&nodes[1], &block); let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS; @@ -4263,7 +4270,7 @@ macro_rules! check_spendable_outputs { match event { Event::SpendableOutputs { mut outputs } => { for outp in outputs.drain(..) { - txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &secp_ctx).unwrap()); + txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap()); all_outputs.push(outp); } }, @@ -4271,7 +4278,7 @@ macro_rules! check_spendable_outputs { }; } if all_outputs.len() > 1 { - if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &secp_ctx) { + if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx) { txn.push(tx); } } @@ -4446,7 +4453,7 @@ fn test_static_spendable_outputs_timeout_tx() { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("Unexpected event"), } - connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires + connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -4524,7 +4531,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); - connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires + connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(revoked_htlc_txn.len(), 1); @@ -4534,8 +4541,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { assert_ne!(revoked_htlc_txn[0].lock_time.0, 0); // HTLC-Timeout // B will generate justice tx from A's revoked commitment/HTLC tx - let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }); + connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()])); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); @@ -4605,8 +4611,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH // A will generate justice tx from B's revoked commitment/HTLC tx - let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }); + connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()])); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); @@ -4699,8 +4704,7 @@ fn test_onchain_to_onchain_claim() { assert_eq!(c_txn[0].lock_time.0, 0); // Success tx // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor - let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42}; - connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone(), c_txn[0].clone()]}); + connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), c_txn[0].clone()])); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -4756,7 +4760,7 @@ fn test_onchain_to_onchain_claim() { check_spends!(b_txn[0], commitment_tx[0]); assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment - assert_eq!(b_txn[0].lock_time.0, nodes[1].best_block_info().1 + 1); // Success tx + assert_eq!(b_txn[0].lock_time.0, nodes[1].best_block_info().1); // Success tx check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); @@ -4795,8 +4799,8 @@ fn test_duplicate_payment_hash_one_failure_one_success() { // script push size limit so that the below script length checks match // ACCEPTED_HTLC_SCRIPT_WEIGHT. let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV - 40) - .with_features(nodes[3].node.invoice_features()); - let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 800_000, TEST_FINAL_CLTV - 40); + .with_bolt11_features(nodes[3].node.invoice_features()).unwrap(); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 800_000); send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 800_000, duplicate_payment_hash, payment_secret); let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2); @@ -4807,7 +4811,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); - connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires + connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires let htlc_timeout_tx; { // Extract one of the two HTLC-Timeout transaction @@ -4996,7 +5000,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2); let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis; + .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context.holder_dust_limit_satoshis; // 0th HTLC: let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee // 1st HTLC: @@ -5287,7 +5291,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); - connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires + connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let htlc_timeout = { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -5370,7 +5374,7 @@ fn test_key_derivation_params() { // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx mine_transaction(&nodes[0], &local_txn_1[0]); - connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires + connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); @@ -5457,10 +5461,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { check_added_monitors!(nodes[1], 1); let starting_block = nodes[1].best_block_info(); - let mut block = Block { - header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }, - txdata: vec![], - }; + let mut block = create_dummy_block(starting_block.0, 42, Vec::new()); for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 { connect_block(&nodes[1], &block); block.header.prev_blockhash = block.block_hash(); @@ -5490,11 +5491,11 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { // to "time out" the HTLC. let starting_block = nodes[1].best_block_info(); - let mut header = BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; + let mut block = create_dummy_block(starting_block.0, 42, Vec::new()); for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 { - connect_block(&nodes[0], &Block { header, txdata: Vec::new()}); - header.prev_blockhash = header.block_hash(); + connect_block(&nodes[0], &block); + block.header.prev_blockhash = block.block_hash(); } test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); @@ -5536,10 +5537,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no } let starting_block = nodes[1].best_block_info(); - let mut block = Block { - header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }, - txdata: vec![], - }; + let mut block = create_dummy_block(starting_block.0, 42, Vec::new()); for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 { connect_block(&nodes[0], &block); block.header.prev_blockhash = block.block_hash(); @@ -5737,9 +5735,6 @@ fn test_fail_holding_cell_htlc_upon_free() { chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1); - let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}", - hex::encode(our_payment_hash.0), chan_stat.channel_reserve_msat, hex::encode(chan.2)); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1); // Check that the payment failed to be sent out. let events = nodes[0].node.get_and_clear_pending_events(); @@ -5749,7 +5744,7 @@ fn test_fail_holding_cell_htlc_upon_free() { assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap()); assert_eq!(our_payment_hash.clone(), *payment_hash); assert_eq!(*payment_failed_permanently, false); - assert_eq!(*short_channel_id, Some(route.paths[0][0].short_channel_id)); + assert_eq!(*short_channel_id, Some(route.paths[0].hops[0].short_channel_id)); }, _ => panic!("Unexpected event"), } @@ -5828,9 +5823,6 @@ fn test_free_and_fail_holding_cell_htlcs() { chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1); - let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}", - hex::encode(payment_hash_2.0), chan_stat.channel_reserve_msat, hex::encode(chan.2)); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1); // Check that the second payment failed to be sent out. let events = nodes[0].node.get_and_clear_pending_events(); @@ -5840,7 +5832,7 @@ fn test_free_and_fail_holding_cell_htlcs() { assert_eq!(payment_id_2, *payment_id.as_ref().unwrap()); assert_eq!(payment_hash_2.clone(), *payment_hash); assert_eq!(*payment_failed_permanently, false); - assert_eq!(*short_channel_id, Some(route_2.paths[0][0].short_channel_id)); + assert_eq!(*short_channel_id, Some(route_2.paths[0].hops[0].short_channel_id)); }, _ => panic!("Unexpected event"), } @@ -5896,10 +5888,10 @@ fn test_free_and_fail_holding_cell_htlcs() { fn test_fail_holding_cell_htlc_upon_free_multihop() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - // When this test was written, the default base fee floated based on the HTLC count. - // It is now fixed, so we simply set the fee to the expected value here. + // Avoid having to include routing fees in calculations let mut config = test_default_channel_config(); - config.channel_config.forwarding_fee_base_msat = 196; + config.channel_config.forwarding_fee_base_msat = 0; + config.channel_config.forwarding_fee_proportional_millionths = 0; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); @@ -5931,9 +5923,7 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_0_1.2); // Send a payment which passes reserve checks but gets stuck in the holding cell. - let feemsat = 239; - let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; - let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors) - total_routing_fee_msat; + let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send); let payment_event = { nodes[0].node.send_payment_with_route(&route, our_payment_hash, @@ -6037,14 +6027,12 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - route.paths[0][0].fee_msat = 100; + route.paths[0].hops[0].fee_msat = 100; unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert!(regex::Regex::new(r"Cannot send less than their minimum HTLC value \(\d+\)").unwrap().is_match(err))); + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send less than their minimum HTLC value", 1); } #[test] @@ -6057,7 +6045,7 @@ fn test_update_add_htlc_bolt2_sender_zero_value_msat() { let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - route.paths[0][0].fee_msat = 0; + route.paths[0].hops[0].fee_msat = 0; unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err }, @@ -6101,9 +6089,9 @@ fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) - .with_features(nodes[1].node.invoice_features()); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000, 0); - route.paths[0].last_mut().unwrap().cltv_expiry_delta = 500000001; + .with_bolt11_features(nodes[1].node.invoice_features()).unwrap(); + let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); + route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001; unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::InvalidRoute { ref err }, @@ -6121,8 +6109,10 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64; + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.counterparty_max_accepted_htlcs as u64; + // Fetch a route in advance as we will be unable to once we're unable to send. + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); for i in 0..max_accepted_htlcs { let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); let payment_event = { @@ -6146,14 +6136,11 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000); } - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err))); + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1); } #[test] @@ -6172,14 +6159,11 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight); // Manually create a route over our max in flight (which our router normally automatically // limits us to. - route.paths[0][0].fee_msat = max_in_flight + 1; + route.paths[0].hops[0].fee_msat = max_in_flight + 1; unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err))); - + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1); send_payment(&nodes[0], &[&nodes[1]], max_in_flight); } @@ -6198,7 +6182,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); let channel = chan_lock.channel_by_id.get(&chan.2).unwrap(); - htlc_minimum_msat = channel.get_holder_htlc_minimum_msat(); + htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat(); } let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat); @@ -6261,13 +6245,16 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 3999999); + let send_amt = 3999999; + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000); + route.paths[0].hops[0].fee_msat = send_amt; let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1; let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap(); let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( - &route.paths[0], 3999999, RecipientOnionFields::secret_only(our_payment_secret), cur_height, &None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash); + &route.paths[0], send_amt, RecipientOnionFields::secret_only(our_payment_secret), cur_height, &None).unwrap(); + let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap(); let mut msg = msgs::UpdateAddHTLC { channel_id: chan.2, @@ -6276,6 +6263,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { payment_hash: our_payment_hash, cltv_expiry: htlc_cltv, onion_routing_packet: onion_packet.clone(), + skimmed_fee_msat: None, }; for i in 0..50 { @@ -6361,10 +6349,14 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { //Disconnect and Reconnect nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); @@ -6793,7 +6785,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { let chan =create_announced_chan_between_nodes(&nodes, 0, 1); let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis; // We route 2 dust-HTLCs between A and B let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); @@ -6886,7 +6878,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis; let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000); @@ -6927,7 +6919,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); - connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires + connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..) .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].txid()).collect(); check_spends!(timeout_tx[0], bs_commitment_tx[0]); @@ -6938,7 +6930,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { if !revoked { assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); } else { - assert_eq!(timeout_tx[0].lock_time.0, 12); + assert_eq!(timeout_tx[0].lock_time.0, 11); } // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx mine_transaction(&nodes[0], &timeout_tx[0]); @@ -6969,8 +6961,8 @@ fn test_user_configurable_csv_delay() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound() - if let Err(error) = Channel::new_outbound(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), + // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new() + if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0, &low_our_to_self_config, 0, 42) { @@ -6980,11 +6972,11 @@ fn test_user_configurable_csv_delay() { } } else { assert!(false) } - // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_from_req() + // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new() nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.to_self_delay = 200; - if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), + if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, &low_our_to_self_config, 0, &nodes[0].logger, 42) { @@ -7012,11 +7004,11 @@ fn test_user_configurable_csv_delay() { } else { panic!(); } check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }); - // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req() + // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new() nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.to_self_delay = 200; - if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), + if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, &high_their_to_self_config, 0, &nodes[0].logger, 42) { @@ -7043,8 +7035,8 @@ fn test_check_htlc_underpaying() { let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV).with_features(nodes[1].node.invoice_features()); - let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None, 10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer, &random_seed_bytes).unwrap(); + let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV).with_bolt11_features(nodes[1].node.invoice_features()).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None, 10_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap(); let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]); let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap(); nodes[0].node.send_payment_with_route(&route, our_payment_hash, @@ -7127,10 +7119,14 @@ fn test_announce_disable_channels() { } } // Reconnect peers - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 3); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 3); @@ -7189,8 +7185,8 @@ fn test_bump_penalty_txn_on_revoked_commitment() { let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 30) - .with_features(nodes[0].node.invoice_features()); - let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000, 30); + .with_bolt11_features(nodes[0].node.invoice_features()).unwrap(); + let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000); send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000); let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2); @@ -7212,8 +7208,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() { // Actually revoke tx by claiming a HTLC claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); - let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[1], &Block { header, txdata: vec![revoked_txn[0].clone()] }); + connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()])); check_added_monitors!(nodes[1], 1); // One or more justice tx should have been broadcast, check it @@ -7294,15 +7289,15 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps) - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_features(nodes[1].node.invoice_features()); + let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.invoice_features()).unwrap(); let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None, - 3_000_000, 50, nodes[0].logger, &scorer, &random_seed_bytes).unwrap(); + 3_000_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap(); let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0; - let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50).with_features(nodes[0].node.invoice_features()); + let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50).with_bolt11_features(nodes[0].node.invoice_features()).unwrap(); let route = get_route(&nodes[1].node.get_our_node_id(), &payment_params, &nodes[1].network_graph.read_only(), None, - 3_000_000, 50, nodes[0].logger, &scorer, &random_seed_bytes).unwrap(); + 3_000_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap(); send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000); let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2); @@ -7312,13 +7307,12 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { // Revoke local commitment tx claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); - let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx - connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }); + connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()])); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); - connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above) + connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above) let revoked_htlc_txn = { let txn = nodes[1].tx_broadcaster.unique_txn_broadcast(); @@ -7338,10 +7332,10 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { // Broadcast set of revoked txn on A let hash_128 = connect_blocks(&nodes[0], 40); - let header_11 = BlockHeader { version: 0x20000000, prev_blockhash: hash_128, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] }); - let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()] }); + let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]); + connect_block(&nodes[0], &block_11); + let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]); + connect_block(&nodes[0], &block_129); let events = nodes[0].node.get_and_clear_pending_events(); expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true); match events.last().unwrap() { @@ -7392,10 +7386,10 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { } // Connect one more block to see if bumped penalty are issued for HTLC txn - let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn }); - let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() }); + let block_130 = create_dummy_block(block_129.block_hash(), 42, penalty_txn); + connect_block(&nodes[0], &block_130); + let block_131 = create_dummy_block(block_130.block_hash(), 42, Vec::new()); + connect_block(&nodes[0], &block_131); // Few more blocks to confirm penalty txn connect_blocks(&nodes[0], 4); @@ -7417,8 +7411,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { txn }; // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs - let header_145 = BlockHeader { version: 0x20000000, prev_blockhash: header_144, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_145, txdata: node_txn }); + connect_block(&nodes[0], &create_dummy_block(header_144, 42, node_txn)); connect_blocks(&nodes[0], 20); { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -7464,7 +7457,7 @@ fn test_bump_penalty_txn_on_remote_commitment() { expect_payment_claimed!(nodes[1], payment_hash, 3_000_000); mine_transaction(&nodes[1], &remote_txn[0]); check_added_monitors!(nodes[1], 2); - connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires + connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires // One or more claim tx should have been broadcast, check it let timeout; @@ -7510,7 +7503,7 @@ fn test_bump_penalty_txn_on_remote_commitment() { assert_ne!(feerate_preimage, 0); // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it - connect_blocks(&nodes[1], 15); + connect_blocks(&nodes[1], 1); { let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); @@ -7630,8 +7623,7 @@ fn test_bump_txn_sanitize_tracking_maps() { node_txn.clear(); penalty_txn }; - let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn }); + connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, penalty_txn)); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.txid(), index: 0 }).unwrap(); @@ -7640,45 +7632,6 @@ fn test_bump_txn_sanitize_tracking_maps() { } } -#[test] -fn test_pending_claimed_htlc_no_balance_underflow() { - // Tests that if we have a pending outbound HTLC as well as a claimed-but-not-fully-removed - // HTLC we will not underflow when we call `Channel::get_balance_msat()`. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); - - let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_010_000); - nodes[1].node.claim_funds(payment_preimage); - expect_payment_claimed!(nodes[1], payment_hash, 1_010_000); - check_added_monitors!(nodes[1], 1); - let fulfill_ev = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &fulfill_ev.update_fulfill_htlcs[0]); - expect_payment_sent_without_paths!(nodes[0], payment_preimage); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &fulfill_ev.commitment_signed); - check_added_monitors!(nodes[0], 1); - let (_raa, _cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - - // At this point nodes[1] has received 1,010k msat (10k msat more than their reserve) and can - // send an HTLC back (though it will go in the holding cell). Send an HTLC back and check we - // can get our balance. - - // Get a route from nodes[1] to nodes[0] by getting a route going the other way and then flip - // the public key of the only hop. This works around ChannelDetails not showing the - // almost-claimed HTLC as available balance. - let (mut route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 10_000); - route.payment_params = None; // This is all wrong, but unnecessary - route.paths[0][0].pubkey = nodes[0].node.get_our_node_id(); - let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]); - nodes[1].node.send_payment_with_route(&route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - - assert_eq!(nodes[1].node.list_channels()[0].balance_msat, 1_000_000); -} - #[test] fn test_channel_conf_timeout() { // Tests that, for inbound channels, we give up on them if the funding transaction does not @@ -7926,7 +7879,7 @@ fn test_reject_funding_before_inbound_channel_accepted() { let accept_chan_msg = { let mut node_1_per_peer_lock; let mut node_1_peer_state_lock; - let channel = get_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id); + let channel = get_inbound_v1_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id); channel.get_accept_channel_message() }; nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg); @@ -8043,19 +7996,19 @@ fn test_onion_value_mpp_set_calculation() { let sample_path = route.paths.pop().unwrap(); let mut path_1 = sample_path.clone(); - path_1[0].pubkey = nodes[1].node.get_our_node_id(); - path_1[0].short_channel_id = chan_1_id; - path_1[1].pubkey = nodes[3].node.get_our_node_id(); - path_1[1].short_channel_id = chan_3_id; - path_1[1].fee_msat = 100_000; + path_1.hops[0].pubkey = nodes[1].node.get_our_node_id(); + path_1.hops[0].short_channel_id = chan_1_id; + path_1.hops[1].pubkey = nodes[3].node.get_our_node_id(); + path_1.hops[1].short_channel_id = chan_3_id; + path_1.hops[1].fee_msat = 100_000; route.paths.push(path_1); let mut path_2 = sample_path.clone(); - path_2[0].pubkey = nodes[2].node.get_our_node_id(); - path_2[0].short_channel_id = chan_2_id; - path_2[1].pubkey = nodes[3].node.get_our_node_id(); - path_2[1].short_channel_id = chan_4_id; - path_2[1].fee_msat = 1_000; + path_2.hops[0].pubkey = nodes[2].node.get_our_node_id(); + path_2.hops[0].short_channel_id = chan_2_id; + path_2.hops[1].pubkey = nodes[3].node.get_our_node_id(); + path_2.hops[1].short_channel_id = chan_4_id; + path_2.hops[1].fee_msat = 1_000; route.paths.push(path_2); // Send payment @@ -8087,7 +8040,7 @@ fn test_onion_value_mpp_set_calculation() { // Edit amt_to_forward to simulate the sender having set // the final amount and the routing node taking less fee onion_payloads[1].amt_to_forward = 99_000; - let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash); + let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap(); payment_event.msgs[0].onion_routing_packet = new_onion_packet; } @@ -8152,11 +8105,11 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) { for i in 0..routing_node_count { let routing_node = 2 + i; let mut path = sample_path.clone(); - path[0].pubkey = nodes[routing_node].node.get_our_node_id(); - path[0].short_channel_id = src_chan_ids[i]; - path[1].pubkey = nodes[dst_idx].node.get_our_node_id(); - path[1].short_channel_id = dst_chan_ids[i]; - path[1].fee_msat = msat_amounts[i]; + path.hops[0].pubkey = nodes[routing_node].node.get_our_node_id(); + path.hops[0].short_channel_id = src_chan_ids[i]; + path.hops[1].pubkey = nodes[dst_idx].node.get_our_node_id(); + path.hops[1].short_channel_id = dst_chan_ids[i]; + path.hops[1].fee_msat = msat_amounts[i]; route.paths.push(path); } @@ -8205,12 +8158,12 @@ fn test_simple_mpp() { let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); let path = route.paths[0].clone(); route.paths.push(path); - route.paths[0][0].pubkey = nodes[1].node.get_our_node_id(); - route.paths[0][0].short_channel_id = chan_1_id; - route.paths[0][1].short_channel_id = chan_3_id; - route.paths[1][0].pubkey = nodes[2].node.get_our_node_id(); - route.paths[1][0].short_channel_id = chan_2_id; - route.paths[1][1].short_channel_id = chan_4_id; + route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].short_channel_id = chan_1_id; + route.paths[0].hops[1].short_channel_id = chan_3_id; + route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1].hops[0].short_channel_id = chan_2_id; + route.paths[1].hops[1].short_channel_id = chan_4_id; send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret); claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage); } @@ -8275,14 +8228,7 @@ fn test_secret_timeout() { } else { panic!(); } let mut block = { let node_1_blocks = nodes[1].blocks.lock().unwrap(); - Block { - header: BlockHeader { - version: 0x2000000, - prev_blockhash: node_1_blocks.last().unwrap().0.block_hash(), - merkle_root: TxMerkleNode::all_zeros(), - time: node_1_blocks.len() as u32 + 7200, bits: 42, nonce: 42 }, - txdata: vec![], - } + create_dummy_block(node_1_blocks.last().unwrap().0.block_hash(), node_1_blocks.len() as u32 + 7200, Vec::new()) }; connect_block(&nodes[1], &block); if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) { @@ -8430,11 +8376,10 @@ fn test_update_err_monitor_lockdown() { assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed); watchtower }; - let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - let block = Block { header, txdata: vec![] }; + let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating // transaction lock time requirements here. - chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 0)); + chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 200)); watchtower.chain_monitor.block_connected(&block, 200); // Try to update ChannelMonitor @@ -8449,7 +8394,7 @@ fn test_update_err_monitor_lockdown() { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2); - if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { + if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure); assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } @@ -8486,6 +8431,9 @@ fn test_concurrent_monitor_claim() { let chain_source = test_utils::TestChainSource::new(Network::Testnet); let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice")); let persister = test_utils::TestPersister::new(); + let alice_broadcaster = test_utils::TestBroadcaster::with_blocks( + Arc::new(Mutex::new(nodes[0].blocks.lock().unwrap().clone())), + ); let watchtower_alice = { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); @@ -8494,20 +8442,20 @@ fn test_concurrent_monitor_claim() { assert!(new_monitor == *monitor); new_monitor }; - let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); + let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed); watchtower }; - let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - let block = Block { header, txdata: vec![] }; - // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating - // transaction lock time requirements here. - chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize((CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS) as usize, (block.clone(), 0)); - watchtower_alice.chain_monitor.block_connected(&block, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); + let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); + // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time + // requirements here. + const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS; + alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, (block.clone(), HTLC_TIMEOUT_BROADCAST)); + watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST); // Watchtower Alice should have broadcast a commitment/HTLC-timeout let alice_state = { - let mut txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcast(); + let mut txn = alice_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 2); txn.remove(0) }; @@ -8516,6 +8464,7 @@ fn test_concurrent_monitor_claim() { let chain_source = test_utils::TestChainSource::new(Network::Testnet); let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob")); let persister = test_utils::TestPersister::new(); + let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks)); let watchtower_bob = { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); @@ -8524,12 +8473,11 @@ fn test_concurrent_monitor_claim() { assert!(new_monitor == *monitor); new_monitor }; - let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); + let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed); watchtower }; - let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); + watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1); // Route another payment to generate another update with still previous HTLC pending let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000); @@ -8544,7 +8492,7 @@ fn test_concurrent_monitor_claim() { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2); - if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { + if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure); assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); @@ -8555,22 +8503,25 @@ fn test_concurrent_monitor_claim() { check_added_monitors!(nodes[0], 1); //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout - let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); + watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST); // Watchtower Bob should have broadcast a commitment/HTLC-timeout let bob_state_y; { - let mut txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcast(); + let mut txn = bob_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 2); bob_state_y = txn.remove(0); }; // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout - let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![bob_state_y.clone()] }, CHAN_CONFIRM_DEPTH + 2 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); + let height = HTLC_TIMEOUT_BROADCAST + 1; + connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); + check_closed_broadcast(&nodes[0], 1, true); + check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false); + watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height); + check_added_monitors(&nodes[0], 1); { - let htlc_txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcast(); + let htlc_txn = alice_broadcaster.txn_broadcast(); assert_eq!(htlc_txn.len(), 2); check_spends!(htlc_txn[0], bob_state_y); // Alice doesn't clean up the old HTLC claim since it hasn't seen a conflicting spend for @@ -8642,15 +8593,15 @@ fn test_htlc_no_detection() { check_spends!(local_txn[0], chan_1.3); // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx - let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] }); + let block = create_dummy_block(nodes[0].best_block_hash(), 42, vec![local_txn[0].clone()]); + connect_block(&nodes[0], &block); // We deliberately connect the local tx twice as this should provoke a failure calling // this test before #653 fix. - chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1); + chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); - connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); + connect_blocks(&nodes[0], TEST_FINAL_CLTV); let htlc_timeout = { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -8661,8 +8612,7 @@ fn test_htlc_no_detection() { node_txn[0].clone() }; - let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] }); + connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()])); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[0], our_payment_hash, false); } @@ -8722,8 +8672,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain true => alice_txn.clone(), false => get_local_commitment_txn!(nodes[1], chan_ab.2) }; - let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42}; - connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]}); + connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()])); if broadcast_alice { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); @@ -8802,8 +8751,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain let mut txn_to_broadcast = alice_txn.clone(); if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); } if !go_onchain_before_fulfill { - let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42}; - connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]}); + connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()])); // If Bob was the one to force-close, he will have already passed these checks earlier. if broadcast_alice { check_closed_broadcast!(nodes[1], true); @@ -8991,16 +8939,16 @@ fn test_duplicate_chan_id() { nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event - let funding_created = { + let (_, funding_created) = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we // try to create another channel. Instead, we drop the channel entirely here (leaving the // channelmanager in a possibly nonsense state instead). - let mut as_chan = a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap(); + let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap(); let logger = test_utils::TestLogger::new(); - as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap() + as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap() }; check_added_monitors!(nodes[0], 0); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created); @@ -9248,7 +9196,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // We should broadcast an HTLC transaction spending our funding transaction first let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(spending_txn.len(), 2); - assert_eq!(spending_txn[0], node_txn[0]); + assert_eq!(spending_txn[0].txid(), node_txn[0].txid()); check_spends!(spending_txn[1], node_txn[0]); // We should also generate a SpendableOutputs event with the to_self output (as its // timelock is up). @@ -9290,8 +9238,8 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_features(nodes[1].node.invoice_features()); - let route = get_route!(nodes[0], payment_params, 10_000, TEST_FINAL_CLTV).unwrap(); + .with_bolt11_features(nodes[1].node.invoice_features()).unwrap(); + let route = get_route!(nodes[0], payment_params, 10_000).unwrap(); let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]); @@ -9399,12 +9347,12 @@ fn test_inconsistent_mpp_params() { let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0); let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_features(nodes[3].node.invoice_features()); - let mut route = get_route!(nodes[0], payment_params, 15_000_000, TEST_FINAL_CLTV).unwrap(); + .with_bolt11_features(nodes[3].node.invoice_features()).unwrap(); + let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap(); assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first - if path_a[0].pubkey == nodes[1].node.get_our_node_id() { + if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() { core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } }); @@ -9505,12 +9453,12 @@ fn test_keysend_payments_to_public_node() { let payer_pubkey = nodes[0].node.get_our_node_id(); let payee_pubkey = nodes[1].node.get_our_node_id(); let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), + payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false), final_value_msat: 10000, }; let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); - let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger, &scorer, &random_seed_bytes).unwrap(); + let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap(); let test_preimage = PaymentPreimage([42; 32]); let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage), @@ -9536,7 +9484,7 @@ fn test_keysend_payments_to_private_node() { let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]); let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), + payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false), final_value_msat: 10000, }; let network_graph = nodes[0].network_graph.clone(); @@ -9545,7 +9493,7 @@ fn test_keysend_payments_to_private_node() { let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::>()), - nodes[0].logger, &scorer, &random_seed_bytes + nodes[0].logger, &scorer, &(), &random_seed_bytes ).unwrap(); let test_preimage = PaymentPreimage([42; 32]); @@ -9580,7 +9528,7 @@ fn test_double_partial_claim() { assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first - if path_a[0].pubkey == nodes[1].node.get_our_node_id() { + if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() { core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } }); @@ -9666,8 +9614,8 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e if on_holder_tx { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; - let mut chan = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id); - chan.holder_dust_limit_satoshis = 546; + let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id); + chan.context.holder_dust_limit_satoshis = 546; } nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); @@ -9683,11 +9631,15 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); + // Fetch a route in advance as we will be unable to once we're unable to send. + let (mut route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000); + let dust_buffer_feerate = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); let chan = chan_lock.channel_by_id.get(&channel_id).unwrap(); - chan.get_dust_buffer_feerate(None) as u64 + chan.context.get_dust_buffer_feerate(None) as u64 }; let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000; let dust_outbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat; @@ -9695,7 +9647,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000; let dust_inbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat; - let dust_htlc_on_counterparty_tx: u64 = 25; + let dust_htlc_on_counterparty_tx: u64 = 4; let dust_htlc_on_counterparty_tx_msat: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx; if on_holder_tx { @@ -9720,7 +9672,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e if dust_outbound_balance { // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`) // Outbound dust balance: 5000 sats - for _ in 0..dust_htlc_on_counterparty_tx { + for _ in 0..dust_htlc_on_counterparty_tx - 1 { let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat); nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); @@ -9728,32 +9680,27 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e } else { // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`) // Inbound dust balance: 5000 sats - for _ in 0..dust_htlc_on_counterparty_tx { + for _ in 0..dust_htlc_on_counterparty_tx - 1 { route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat); } } } - let dust_overflow = dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx + 1); if exposure_breach_event == ExposureEvent::AtHTLCForward { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat }); - let mut config = UserConfig::default(); + route.paths[0].hops.last_mut().unwrap().fee_msat = + if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 }; // With default dust exposure: 5000 sats if on_holder_tx { - let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * (dust_outbound_htlc_on_holder_tx + 1); - let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * dust_inbound_htlc_on_holder_tx + dust_outbound_htlc_on_holder_tx_msat; unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_config.max_dust_htlc_exposure_msat))); + ), true, APIError::ChannelUnavailable { .. }, {}); } else { unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_config.max_dust_htlc_exposure_msat))); + ), true, APIError::ChannelUnavailable { .. }, {}); } } else if exposure_breach_event == ExposureEvent::AtHTLCReception { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat }); + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 }); nodes[1].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[1], 1); @@ -9769,10 +9716,13 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_config.max_dust_htlc_exposure_msat), 1); } else { // Outbound dust balance: 5200 sats - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_config.max_dust_htlc_exposure_msat), 1); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), + format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", + dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 1, + config.channel_config.max_dust_htlc_exposure_msat), 1); } } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 2_500_000); + route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000; nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); { @@ -9826,8 +9776,8 @@ fn test_non_final_funding_tx() { assert_eq!(events.len(), 1); let mut tx = match events[0] { Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => { - // Timelock the transaction _beyond_ the best client height + 2. - Transaction { version: chan_id as i32, lock_time: PackedLockTime(best_height + 3), input: vec![input], output: vec![TxOut { + // Timelock the transaction _beyond_ the best client height + 1. + Transaction { version: chan_id as i32, lock_time: PackedLockTime(best_height + 2), input: vec![input], output: vec![TxOut { value: *channel_value_satoshis, script_pubkey: output_script.clone(), }]} }, @@ -9841,7 +9791,7 @@ fn test_non_final_funding_tx() { _ => panic!() } - // However, transaction should be accepted if it's in a +2 headroom from best block. + // However, transaction should be accepted if it's in a +1 headroom from best block. tx.lock_time = PackedLockTime(tx.lock_time.0 - 1); assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok()); get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); @@ -9946,7 +9896,7 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(recv_value), 7200, Some(min_final_cltv_expiry_delta)).unwrap(); (payment_hash, nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(), payment_secret) }; - let route = get_route!(nodes[0], payment_parameters, recv_value, final_cltv_expiry_delta as u32).unwrap(); + let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap(); nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -9982,3 +9932,132 @@ fn test_payment_with_custom_min_cltv_expiry_delta() { do_payment_with_custom_min_final_cltv_expiry(true, false); do_payment_with_custom_min_final_cltv_expiry(true, true); } + +#[test] +fn test_disconnects_peer_awaiting_response_ticks() { + // Tests that nodes which are awaiting on a response critical for channel responsiveness + // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. + let mut chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Asserts a disconnect event is queued to the user. + let check_disconnect_event = |node: &Node, should_disconnect: bool| { + let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event| + if let MessageSendEvent::HandleError { action, .. } = event { + if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action { + Some(()) + } else { + None + } + } else { + None + } + ); + assert_eq!(disconnect_event.is_some(), should_disconnect); + }; + + // Fires timer ticks ensuring we only attempt to disconnect peers after reaching + // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. + let check_disconnect = |node: &Node| { + // No disconnect without any timer ticks. + check_disconnect_event(node, false); + + // No disconnect with 1 timer tick less than required. + for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 { + node.node.timer_tick_occurred(); + check_disconnect_event(node, false); + } + + // Disconnect after reaching the required ticks. + node.node.timer_tick_occurred(); + check_disconnect_event(node, true); + + // Disconnect again on the next tick if the peer hasn't been disconnected yet. + node.node.timer_tick_occurred(); + check_disconnect_event(node, true); + }; + + create_chan_between_nodes(&nodes[0], &nodes[1]); + + // We'll start by performing a fee update with Alice (nodes[0]) on the channel. + *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2; + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(&nodes[0], 1); + let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed); + check_added_monitors!(&nodes[1], 1); + + // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`. + let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack); + check_added_monitors!(&nodes[0], 1); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed); + check_added_monitors(&nodes[0], 1); + + // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We + // pretend Bob hasn't received the message and check whether he'll disconnect Alice after + // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. + let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + check_disconnect(&nodes[1]); + + // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message. + // + // Note that since the commitment dance didn't complete above, Alice is expected to resend her + // final `RevokeAndACK` to Bob to complete it. + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + let bob_init = msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }; + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap(); + let alice_init = msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }; + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap(); + + // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't + // received Bob's yet, so she should disconnect him after reaching + // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. + let alice_channel_reestablish = get_event_msg!( + nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id() + ); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish); + check_disconnect(&nodes[0]); + + // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live". + let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event| + if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + Some(msg.clone()) + } else { + None + } + ).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish); + + // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages. + for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS { + nodes[0].node.timer_tick_occurred(); + check_disconnect_event(&nodes[0], false); + } + + // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after + // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. + check_disconnect(&nodes[1]); + + // Finally, have Bob process the last message. + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack); + check_added_monitors(&nodes[1], 1); + + // At this point, neither node should attempt to disconnect each other, since they aren't + // waiting on any messages. + for node in &nodes { + for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS { + node.node.timer_tick_occurred(); + check_disconnect_event(node, false); + } + } +}