X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=7cd0f376d2e431d1fe8fe56c5353d281dbf28e55;hb=1e580668684d4dbf11d69d75e5d4a5c4f8cc40bf;hp=c0254e35fd9064711df34d2643963ee5dd7738bc;hpb=6647bff344f5f476b8b279c8756db3e65d5405a1;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index c0254e35..7cd0f376 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -17,9 +17,9 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor; use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use crate::chain::transaction::OutPoint; -use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; +use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; -use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash}; +use crate::ln::types::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; @@ -42,19 +42,17 @@ use bitcoin::blockdata::locktime::absolute::LockTime; use bitcoin::blockdata::script::{Builder, ScriptBuf}; use bitcoin::blockdata::opcodes; use bitcoin::blockdata::constants::ChainHash; -use bitcoin::network::constants::Network; -use bitcoin::{Sequence, Transaction, TxIn, TxOut, Witness}; +use bitcoin::network::Network; +use bitcoin::{Amount, Sequence, Transaction, TxIn, TxOut, Witness}; use bitcoin::OutPoint as BitcoinOutPoint; +use bitcoin::transaction::Version; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::{PublicKey,SecretKey}; -use regex; - use crate::io; use crate::prelude::*; use alloc::collections::BTreeSet; -use core::default::Default; use core::iter::repeat; use bitcoin::hashes::Hash; use crate::sync::{Arc, Mutex, RwLock}; @@ -64,6 +62,38 @@ use crate::ln::chan_utils::CommitmentTransaction; use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS; +#[test] +fn test_channel_resumption_fail_post_funding() { + // If we fail to exchange funding with a peer prior to it disconnecting we'll resume the + // channel open on reconnect, however if we do exchange funding we do not currently support + // replaying it and here test that the channel closes. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 0, 42, None, None).unwrap(); + let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan); + let accept_chan = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan); + + let (temp_chan_id, tx, funding_output) = + create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + let new_chan_id = ChannelId::v1_from_funding_outpoint(funding_output); + nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx).unwrap(); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(new_chan_id, true, ClosureReason::DisconnectedPeer)]); + + // After ddf75afd16 we'd panic on reconnection if we exchanged funding info, so test that + // explicitly here. + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new()); +} + #[test] fn test_insane_channel_opens() { // Stand up a network of 2 nodes @@ -107,22 +137,22 @@ fn test_insane_channel_opens() { use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT; // Test all mutations that would make the channel open message insane - insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg }); - insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg }); + insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg }); + insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg }); - insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg }); + insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; msg }); - insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg }); + insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg }); - insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg }); + insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1 ; msg }); - insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg }); + insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.common_fields.htlc_minimum_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg }); - insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg }); + insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg }); - insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg }); + insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.common_fields.max_accepted_htlcs = 0; msg }); - insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg }); + insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.common_fields.max_accepted_htlcs = 484; msg }); } #[test] @@ -166,7 +196,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); if !send_from_initiator { open_channel_message.channel_reserve_satoshis = 0; - open_channel_message.max_htlc_value_in_flight_msat = 100_000_000; + open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; } nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message); @@ -174,7 +204,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); if send_from_initiator { accept_channel_message.channel_reserve_satoshis = 0; - accept_channel_message.max_htlc_value_in_flight_msat = 100_000_000; + accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; } nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message); { @@ -190,7 +220,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { chan_context.holder_selected_channel_reserve_satoshis = 0; chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000; }, - ChannelPhase::Funded(_) => assert!(false), + _ => assert!(false), } } @@ -682,7 +712,7 @@ fn test_update_fee_that_funder_cannot_afford() { //We made sure neither party's funds are below the dust limit and there are no HTLCs here assert_eq!(commitment_tx.output.len(), 2); let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000; - let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value); + let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value.to_sat()); actual_fee = channel_value - actual_fee; assert_eq!(total_fee, actual_fee); } @@ -871,8 +901,8 @@ fn test_update_fee_with_fundee_update_add_htlc() { send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000); send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -985,8 +1015,8 @@ fn test_update_fee() { assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30); assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -1104,17 +1134,17 @@ fn fake_network_test() { // Close down the channels... close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000); - check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true); - check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000); - check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000); + check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000); close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000); - check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000); + check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -1296,9 +1326,9 @@ fn test_duplicate_htlc_different_direction_onchain() { assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound let mut has_both_htlcs = 0; // check htlcs match ones committed for outp in remote_txn[0].output.iter() { - if outp.value == 800_000 / 1000 { + if outp.value.to_sat() == 800_000 / 1000 { has_both_htlcs += 1; - } else if outp.value == 900_000 / 1000 { + } else if outp.value.to_sat() == 900_000 / 1000 { has_both_htlcs += 1; } } @@ -1327,12 +1357,12 @@ fn test_duplicate_htlc_different_direction_onchain() { assert_eq!(preimage_tx.input.len(), 1); assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx - assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value, 800); + assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value.to_sat(), 800); assert_eq!(timeout_tx.input.len(), 1); assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx check_spends!(timeout_tx, remote_txn[0]); - assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value, 900); + assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value.to_sat(), 900); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 3); @@ -1401,11 +1431,12 @@ fn test_fee_spike_violation_fails_htlc() { let secp_ctx = Secp256k1::new(); let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!"); - let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1; + let cur_height = nodes[1].node.best_block.read().unwrap().height + 1; let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap(); + let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], - 3460001, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap(); + 3460001, &recipient_onion_fields, cur_height, &None).unwrap(); let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); let msg = msgs::UpdateAddHTLC { channel_id: chan.2, @@ -1599,10 +1630,11 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() let secp_ctx = Secp256k1::new(); let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); - let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1; + let cur_height = nodes[1].node.best_block.read().unwrap().height + 1; let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap(); + let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], - 700_000, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap(); + 700_000, &recipient_onion_fields, cur_height, &None).unwrap(); let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); let msg = msgs::UpdateAddHTLC { channel_id: chan.2, @@ -1778,10 +1810,11 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() let secp_ctx = Secp256k1::new(); let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); - let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1; + let cur_height = nodes[0].node.best_block.read().unwrap().height + 1; let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap(); + let recipient_onion_fields = RecipientOnionFields::spontaneous_empty(); let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( - &route_2.paths[0], recv_value_2, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap(); + &route_2.paths[0], recv_value_2, &recipient_onion_fields, cur_height, &None).unwrap(); let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap(); let msg = msgs::UpdateAddHTLC { channel_id: chan.2, @@ -2042,11 +2075,11 @@ fn test_channel_reserve_holding_cell_htlcs() { assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap()); assert_eq!(via_channel_id, Some(chan_2.2)); match &purpose { - PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); assert_eq!(our_payment_secret_21, *payment_secret); }, - _ => panic!("expected PaymentPurpose::InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") } }, _ => panic!("Unexpected event"), @@ -2058,11 +2091,11 @@ fn test_channel_reserve_holding_cell_htlcs() { assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap()); assert_eq!(via_channel_id, Some(chan_2.2)); match &purpose { - PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); assert_eq!(our_payment_secret_22, *payment_secret); }, - _ => panic!("expected PaymentPurpose::InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") } }, _ => panic!("Unexpected event"), @@ -2270,7 +2303,8 @@ fn channel_monitor_network_test() { send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000); // Simple case with no pending HTLCs: - nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); @@ -2296,7 +2330,8 @@ fn channel_monitor_network_test() { // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not // broadcasted until we reach the timelock time). - nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); { @@ -2336,7 +2371,8 @@ fn channel_monitor_network_test() { // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2] // HTLC-Timeout and a nodes[3] claim against it (+ its own announces) - nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id(), error_message.to_string()).unwrap(); check_added_monitors!(nodes[2], 1); check_closed_broadcast!(nodes[2], true); let node2_commitment_txid; @@ -2417,7 +2453,7 @@ fn channel_monitor_network_test() { } check_added_monitors!(nodes[4], 1); test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS); - check_closed_event!(nodes[4], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000); + check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000); mine_transaction(&nodes[4], &node_txn[0]); check_preimage_claim(&nodes[4], &node_txn); @@ -2430,17 +2466,17 @@ fn channel_monitor_network_test() { assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon), Ok(ChannelMonitorUpdateStatus::Completed)); - check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000); + check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000); } #[test] fn test_justice_tx_htlc_timeout() { // Test justice txn built on revoked HTLC-Timeout tx, against both sides - let mut alice_config = UserConfig::default(); + let mut alice_config = test_default_channel_config(); alice_config.channel_handshake_config.announced_channel = true; alice_config.channel_handshake_limits.force_announced_channel_preference = false; alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5; - let mut bob_config = UserConfig::default(); + let mut bob_config = test_default_channel_config(); bob_config.channel_handshake_config.announced_channel = true; bob_config.channel_handshake_limits.force_announced_channel_preference = false; bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3; @@ -2499,11 +2535,11 @@ fn test_justice_tx_htlc_timeout() { #[test] fn test_justice_tx_htlc_success() { // Test justice txn built on revoked HTLC-Success tx, against both sides - let mut alice_config = UserConfig::default(); + let mut alice_config = test_default_channel_config(); alice_config.channel_handshake_config.announced_channel = true; alice_config.channel_handshake_limits.force_announced_channel_preference = false; alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5; - let mut bob_config = UserConfig::default(); + let mut bob_config = test_default_channel_config(); bob_config.channel_handshake_config.announced_channel = true; bob_config.channel_handshake_limits.force_announced_channel_preference = false; bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3; @@ -2646,8 +2682,8 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: } }); // On the first commitment, node[1]'s balance was below dust so it didn't have an output - let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value }; - let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value; + let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value.to_sat() }; + let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value.to_sat(); assert_eq!(total_claimable_balance, expected_claimable_balance); } @@ -2750,7 +2786,7 @@ fn claim_htlc_outputs_single_tx() { check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); let mut events = nodes[0].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true); + expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]); match events.last().unwrap() { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} _ => panic!("Unexpected event"), @@ -2867,8 +2903,8 @@ fn test_htlc_on_chain_success() { check_spends!(node_txn[1], commitment_tx[0]); assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output - assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + assert!(node_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output + assert!(node_txn[1].output[0].script_pubkey.is_p2wsh()); // revokeable output assert_eq!(node_txn[0].lock_time, LockTime::ZERO); assert_eq!(node_txn[1].lock_time, LockTime::ZERO); @@ -2889,8 +2925,10 @@ fn test_htlc_on_chain_success() { } let chan_id = Some(chan_1.2); match forwarded_events[1] { - Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => { - assert_eq!(fee_earned_msat, Some(1000)); + Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, + next_channel_id, outbound_amount_forwarded_msat, .. + } => { + assert_eq!(total_fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, chan_id); assert_eq!(claim_from_onchain_tx, true); assert_eq!(next_channel_id, Some(chan_2.2)); @@ -2899,8 +2937,10 @@ fn test_htlc_on_chain_success() { _ => panic!() } match forwarded_events[2] { - Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => { - assert_eq!(fee_earned_msat, Some(1000)); + Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, + next_channel_id, outbound_amount_forwarded_msat, .. + } => { + assert_eq!(total_fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, chan_id); assert_eq!(claim_from_onchain_tx, true); assert_eq!(next_channel_id, Some(chan_2.2)); @@ -2956,13 +2996,13 @@ fn test_htlc_on_chain_success() { if $htlc_offered { assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output - assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + assert!(node_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output + assert!(node_txn[1].output[0].script_pubkey.is_p2wsh()); // revokeable output } else { assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment - assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment + assert!(node_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment + assert!(node_txn[1].output[0].script_pubkey.is_p2wpkh()); // direct payment } node_txn.clear(); } } @@ -3004,7 +3044,7 @@ fn test_htlc_on_chain_success() { assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); assert_eq!(commitment_spend.lock_time.to_consensus_u32(), nodes[1].best_block_info().1); - assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment + assert!(commitment_spend.output[0].script_pubkey.is_p2wpkh()); // direct payment // We don't bother to check that B can claim the HTLC output on its commitment tx here as // we already checked the same situation with A. @@ -3308,18 +3348,18 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PendingHTLCsForwardable { .. } => { }, - _ => panic!("Unexpected event"), - }; - match events[1] { Event::HTLCHandlingFailed { .. } => { }, _ => panic!("Unexpected event"), } + match events[1] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; // Deliberately don't process the pending fail-back so they all fail back at once after // block connection just like the !deliver_bs_raa case } - let mut failed_htlcs = HashSet::new(); + let mut failed_htlcs = new_hash_set(); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); mine_transaction(&nodes[1], &revoked_local_txn[0]); @@ -3499,9 +3539,10 @@ fn fail_backward_pending_htlc_upon_channel_failure() { let secp_ctx = Secp256k1::new(); let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); - let current_height = nodes[1].node.best_block.read().unwrap().height() + 1; + let current_height = nodes[1].node.best_block.read().unwrap().height + 1; + let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads( - &route.paths[0], 50_000, RecipientOnionFields::secret_only(payment_secret), current_height, &None).unwrap(); + &route.paths[0], 50_000, &recipient_onion_fields, current_height, &None).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap(); let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); @@ -3558,9 +3599,9 @@ fn test_htlc_ignore_latest_remote_commitment() { return; } let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3; - + let error_message = "Channel force-closed"; route_payment(&nodes[0], &[&nodes[1]], 10000000); - nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -3623,8 +3664,8 @@ fn test_force_close_fail_back() { // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). - - nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); @@ -3695,7 +3736,7 @@ fn test_dup_events_on_peer_disconnect() { #[test] fn test_peer_disconnected_before_funding_broadcasted() { // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects - // before the funding transaction has been broadcasted. + // before the funding transaction has been broadcasted, and doesn't reconnect back within time. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -3724,11 +3765,18 @@ fn test_peer_disconnected_before_funding_broadcasted() { assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0); } - // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are - // disconnected before the funding transaction was broadcasted. + // The peers disconnect before the funding is broadcasted. nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + // The time for peers to reconnect expires. + for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS { + nodes[0].node.timer_tick_occurred(); + } + + // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` and a + // `DiscardFunding` event when the peers are disconnected and do not reconnect before the + // funding transaction is broadcasted. check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true , [nodes[1].node.get_our_node_id()], 1000000); check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false @@ -3768,7 +3816,10 @@ fn test_simple_peer_disconnect() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3); + claim_payment_along_route( + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage_3) + .skip_last(true) + ); fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); @@ -3946,11 +3997,11 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); assert_eq!(via_channel_id, Some(channel_id)); match &purpose { - PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret_1, *payment_secret); }, - _ => panic!("expected PaymentPurpose::InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") } }, _ => panic!("Unexpected event"), @@ -4311,11 +4362,11 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => { assert_eq!(payment_hash_2, *payment_hash); match &purpose { - PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret_2, *payment_secret); }, - _ => panic!("expected PaymentPurpose::InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") } }, _ => panic!("Unexpected event"), @@ -4495,7 +4546,8 @@ fn test_claim_sizeable_push_msat() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000); - nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); @@ -4522,9 +4574,10 @@ fn test_claim_on_remote_sizeable_push_msat() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let error_message = "Channel force-closed"; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000); - nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); @@ -4899,7 +4952,7 @@ fn test_onchain_to_onchain_claim() { assert_eq!(c_txn.len(), 1); check_spends!(c_txn[0], commitment_tx[0]); assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + assert!(c_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output assert_eq!(c_txn[0].lock_time, LockTime::ZERO); // Success tx // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor @@ -4912,8 +4965,10 @@ fn test_onchain_to_onchain_claim() { _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => { - assert_eq!(fee_earned_msat, Some(1000)); + Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, + next_channel_id, outbound_amount_forwarded_msat, .. + } => { + assert_eq!(total_fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, Some(chan_1.2)); assert_eq!(claim_from_onchain_tx, true); assert_eq!(next_channel_id, Some(chan_2.2)); @@ -4958,7 +5013,7 @@ fn test_onchain_to_onchain_claim() { assert_eq!(b_txn.len(), 1); check_spends!(b_txn[0], commitment_tx[0]); assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment + assert!(b_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment assert_eq!(b_txn[0].lock_time.to_consensus_u32(), nodes[1].best_block_info().1); // Success tx check_closed_broadcast!(nodes[1], true); @@ -5045,9 +5100,9 @@ fn test_duplicate_payment_hash_one_failure_one_success() { // (with value 900 sats) will be claimed in the below `claim_funds` call. if node_txn.len() > 2 { assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - htlc_timeout_tx = if node_txn[2].output[0].value < 900 { node_txn[2].clone() } else { node_txn[0].clone() }; + htlc_timeout_tx = if node_txn[2].output[0].value.to_sat() < 900 { node_txn[2].clone() } else { node_txn[0].clone() }; } else { - htlc_timeout_tx = if node_txn[0].output[0].value < 900 { node_txn[1].clone() } else { node_txn[0].clone() }; + htlc_timeout_tx = if node_txn[0].output[0].value.to_sat() < 900 { node_txn[1].clone() } else { node_txn[0].clone() }; } } @@ -5338,7 +5393,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1); check_closed_broadcast!(nodes[2], true); if deliver_last_raa { - expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true); + expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true); let expected_destinations: Vec = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect(); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations); @@ -5396,11 +5451,11 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let as_events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 }); - let mut as_failds = HashSet::new(); + let mut as_faileds = new_hash_set(); let mut as_updates = 0; for event in as_events.iter() { if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event { - assert!(as_failds.insert(*payment_hash)); + assert!(as_faileds.insert(*payment_hash)); if *payment_hash != payment_hash_2 { assert_eq!(*payment_failed_permanently, deliver_last_raa); } else { @@ -5412,21 +5467,21 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno } else if let &Event::PaymentFailed { .. } = event { } else { panic!("Unexpected event"); } } - assert!(as_failds.contains(&payment_hash_1)); - assert!(as_failds.contains(&payment_hash_2)); + assert!(as_faileds.contains(&payment_hash_1)); + assert!(as_faileds.contains(&payment_hash_2)); if announce_latest { - assert!(as_failds.contains(&payment_hash_3)); - assert!(as_failds.contains(&payment_hash_5)); + assert!(as_faileds.contains(&payment_hash_3)); + assert!(as_faileds.contains(&payment_hash_5)); } - assert!(as_failds.contains(&payment_hash_6)); + assert!(as_faileds.contains(&payment_hash_6)); let bs_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 }); - let mut bs_failds = HashSet::new(); + let mut bs_faileds = new_hash_set(); let mut bs_updates = 0; for event in bs_events.iter() { if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event { - assert!(bs_failds.insert(*payment_hash)); + assert!(bs_faileds.insert(*payment_hash)); if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 { assert_eq!(*payment_failed_permanently, deliver_last_raa); } else { @@ -5438,12 +5493,12 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno } else if let &Event::PaymentFailed { .. } = event { } else { panic!("Unexpected event"); } } - assert!(bs_failds.contains(&payment_hash_1)); - assert!(bs_failds.contains(&payment_hash_2)); + assert!(bs_faileds.contains(&payment_hash_1)); + assert!(bs_faileds.contains(&payment_hash_2)); if announce_latest { - assert!(bs_failds.contains(&payment_hash_4)); + assert!(bs_faileds.contains(&payment_hash_4)); } - assert!(bs_failds.contains(&payment_hash_5)); + assert!(bs_faileds.contains(&payment_hash_5)); // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to @@ -5534,7 +5589,7 @@ fn test_key_derivation_params() { let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer); - let message_router = test_utils::TestMessageRouter::new(network_graph.clone()); + let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager); let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) }; let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs); node_cfgs.remove(0); @@ -5619,7 +5674,7 @@ fn test_static_output_closing_tx() { let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2; mine_transaction(&nodes[0], &closing_tx); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); @@ -5627,7 +5682,7 @@ fn test_static_output_closing_tx() { check_spends!(spend_txn[0], closing_tx); mine_transaction(&nodes[1], &closing_tx); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -5669,7 +5724,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000); } fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { @@ -5700,7 +5755,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000); } fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { @@ -5746,7 +5801,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000); } else { expect_payment_failed!(nodes[0], our_payment_hash, true); } @@ -5828,26 +5883,26 @@ fn bolt2_open_channel_sending_node_checks_part2() { let push_msat=10001; assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.dust_limit_satoshis); + assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.common_fields.dust_limit_satoshis); // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0 // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1 - assert!(node0_to_1_send_open_channel.channel_flags<=1); + assert!(node0_to_1_send_open_channel.common_fields.channel_flags<=1); // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver. assert!(BREAKDOWN_TIMEOUT>0); - assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT); + assert!(node0_to_1_send_open_channel.common_fields.to_self_delay==BREAKDOWN_TIMEOUT); // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within. let chain_hash = ChainHash::using_genesis_block(Network::Testnet); - assert_eq!(node0_to_1_send_open_channel.chain_hash, chain_hash); + assert_eq!(node0_to_1_send_open_channel.common_fields.chain_hash, chain_hash); // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys. - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok()); - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.revocation_basepoint.serialize()).is_ok()); - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.htlc_basepoint.serialize()).is_ok()); - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.payment_point.serialize()).is_ok()); - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok()); + assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.funding_pubkey.serialize()).is_ok()); + assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.revocation_basepoint.serialize()).is_ok()); + assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.htlc_basepoint.serialize()).is_ok()); + assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.payment_basepoint.serialize()).is_ok()); + assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.delayed_payment_basepoint.serialize()).is_ok()); } #[test] @@ -5861,7 +5916,7 @@ fn bolt2_open_channel_sane_dust_limit() { let push_msat=10001; nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap(); let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - node0_to_1_send_open_channel.dust_limit_satoshis = 547; + node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547; node0_to_1_send_open_channel.channel_reserve_satoshis = 100001; nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel); @@ -6169,7 +6224,7 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process. let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events(); assert_eq!(process_htlc_forwards_event.len(), 2); - match &process_htlc_forwards_event[0] { + match &process_htlc_forwards_event[1] { &Event::PendingHTLCsForwardable { .. } => {}, _ => panic!("Unexpected event"), } @@ -6475,10 +6530,11 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { get_route_and_payment_hash!(nodes[0], nodes[1], 1000); route.paths[0].hops[0].fee_msat = send_amt; let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); - let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1; + let cur_height = nodes[0].node.best_block.read().unwrap().height + 1; let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap(); + let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret); let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( - &route.paths[0], send_amt, RecipientOnionFields::secret_only(our_payment_secret), cur_height, &None).unwrap(); + &route.paths[0], send_amt, &recipient_onion_fields, cur_height, &None).unwrap(); let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap(); let mut msg = msgs::UpdateAddHTLC { @@ -7201,7 +7257,7 @@ fn test_user_configurable_csv_delay() { // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new() nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); - open_channel.to_self_delay = 200; + open_channel.common_fields.to_self_delay = 200; if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false) @@ -7216,7 +7272,7 @@ fn test_user_configurable_csv_delay() { nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap(); nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())); let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - accept_channel.to_self_delay = 200; + accept_channel.common_fields.to_self_delay = 200; nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel); let reason_msg; if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] { @@ -7233,7 +7289,7 @@ fn test_user_configurable_csv_delay() { // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new() nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); - open_channel.to_self_delay = 200; + open_channel.common_fields.to_self_delay = 200; if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false) @@ -7337,7 +7393,7 @@ fn test_announce_disable_channels() { } let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); - let mut chans_disabled = HashMap::new(); + let mut chans_disabled = new_hash_map(); for e in msg_events { match e { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { @@ -7430,8 +7486,8 @@ fn test_bump_penalty_txn_on_revoked_commitment() { let mut penalty_sum = 0; for outp in revoked_txn[0].output.iter() { - if outp.script_pubkey.is_v0_p2wsh() { - penalty_sum += outp.value; + if outp.script_pubkey.is_p2wsh() { + penalty_sum += outp.value.to_sat(); } } @@ -7452,7 +7508,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() { assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs assert_eq!(node_txn[0].output.len(), 1); check_spends!(node_txn[0], revoked_txn[0]); - let fee_1 = penalty_sum - node_txn[0].output[0].value; + let fee_1 = penalty_sum - node_txn[0].output[0].value.to_sat(); feerate_1 = fee_1 * 1000 / node_txn[0].weight().to_wu(); penalty_1 = node_txn[0].txid(); node_txn.clear(); @@ -7472,7 +7528,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() { penalty_2 = node_txn[0].txid(); // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast assert_ne!(penalty_2, penalty_1); - let fee_2 = penalty_sum - node_txn[0].output[0].value; + let fee_2 = penalty_sum - node_txn[0].output[0].value.to_sat(); feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu(); // Verify 25% bump heuristic assert!(feerate_2 * 100 >= feerate_1 * 125); @@ -7495,7 +7551,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() { penalty_3 = node_txn[0].txid(); // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast assert_ne!(penalty_3, penalty_2); - let fee_3 = penalty_sum - node_txn[0].output[0].value; + let fee_3 = penalty_sum - node_txn[0].output[0].value.to_sat(); feerate_3 = fee_3 * 1000 / node_txn[0].weight().to_wu(); // Verify 25% bump heuristic assert!(feerate_3 * 100 >= feerate_2 * 125); @@ -7533,7 +7589,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000); let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); - send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000); + let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1; let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2); assert_eq!(revoked_local_txn[0].input.len(), 1); @@ -7572,7 +7628,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]); connect_block(&nodes[0], &block_129); let events = nodes[0].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true); + expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]); match events.last().unwrap() { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} _ => panic!("Unexpected event"), @@ -7714,7 +7770,7 @@ fn test_bump_penalty_txn_on_remote_commitment() { preimage = node_txn[0].txid(); let index = node_txn[0].input[0].previous_output.vout; - let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value; + let fee = remote_txn[0].output[index as usize].value.to_sat() - node_txn[0].output[0].value.to_sat(); feerate_preimage = fee * 1000 / node_txn[0].weight().to_wu(); let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output { @@ -7729,7 +7785,7 @@ fn test_bump_penalty_txn_on_remote_commitment() { timeout = timeout_tx.txid(); let index = timeout_tx.input[0].previous_output.vout; - let fee = remote_txn[0].output[index as usize].value - timeout_tx.output[0].value; + let fee = remote_txn[0].output[index as usize].value.to_sat() - timeout_tx.output[0].value.to_sat(); feerate_timeout = fee * 1000 / timeout_tx.weight().to_wu(); node_txn.clear(); @@ -7748,13 +7804,13 @@ fn test_bump_penalty_txn_on_remote_commitment() { check_spends!(preimage_bump, remote_txn[0]); let index = preimage_bump.input[0].previous_output.vout; - let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value; + let fee = remote_txn[0].output[index as usize].value.to_sat() - preimage_bump.output[0].value.to_sat(); let new_feerate = fee * 1000 / preimage_bump.weight().to_wu(); assert!(new_feerate * 100 > feerate_timeout * 125); assert_ne!(timeout, preimage_bump.txid()); let index = node_txn[0].input[0].previous_output.vout; - let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value; + let fee = remote_txn[0].output[index as usize].value.to_sat() - node_txn[0].output[0].value.to_sat(); let new_feerate = fee * 1000 / node_txn[0].weight().to_wu(); assert!(new_feerate * 100 > feerate_preimage * 125); assert_ne!(preimage, node_txn[0].txid()); @@ -7921,8 +7977,8 @@ fn test_override_channel_config() { // Assert the channel created by node0 is using the override config. let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - assert_eq!(res.channel_flags, 0); - assert_eq!(res.to_self_delay, 200); + assert_eq!(res.common_fields.channel_flags, 0); + assert_eq!(res.common_fields.to_self_delay, 200); } #[test] @@ -7936,11 +7992,11 @@ fn test_override_0msat_htlc_minimum() { nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap(); let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - assert_eq!(res.htlc_minimum_msat, 1); + assert_eq!(res.common_fields.htlc_minimum_msat, 1); nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res); let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - assert_eq!(res.htlc_minimum_msat, 1); + assert_eq!(res.common_fields.htlc_minimum_msat, 1); } #[test] @@ -8030,8 +8086,8 @@ fn test_manually_accept_inbound_channel_request() { } _ => panic!("Unexpected event"), } - - nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(close_msg_ev.len(), 1); @@ -8062,11 +8118,11 @@ fn test_manually_reject_inbound_channel_request() { // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before // rejecting the inbound channel request. assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - + let error_message = "Channel force-closed"; let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); } _ => panic!("Unexpected event"), } @@ -8212,8 +8268,9 @@ fn test_onion_value_mpp_set_calculation() { let height = nodes[0].best_block_info().1; let session_priv = SecretKey::from_slice(&session_priv).unwrap(); let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); + let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret); let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000, - RecipientOnionFields::secret_only(our_payment_secret), height + 1, &None).unwrap(); + &recipient_onion_fields, height + 1, &None).unwrap(); // Edit amt_to_forward to simulate the sender having set // the final amount and the routing node taking less fee if let msgs::OutboundOnionPayload::Receive { @@ -8248,7 +8305,9 @@ fn test_onion_value_mpp_set_calculation() { let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events); pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None); - claim_payment_along_route(&nodes[0], expected_paths, false, our_payment_preimage); + claim_payment_along_route( + ClaimAlongRouteArgs::new(&nodes[0], expected_paths, our_payment_preimage) + ); } fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) { @@ -8314,7 +8373,9 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) { pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None); } - claim_payment_along_route(&nodes[src_idx], &expected_paths, false, our_payment_preimage); + claim_payment_along_route( + ClaimAlongRouteArgs::new(&nodes[src_idx], &expected_paths, our_payment_preimage) + ); } #[test] @@ -8346,7 +8407,9 @@ fn test_simple_mpp() { route.paths[1].hops[0].short_channel_id = chan_2_id; route.paths[1].hops[1].short_channel_id = chan_4_id; send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret); - claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage); + claim_payment_along_route( + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage) + ); } #[test] @@ -8378,10 +8441,10 @@ fn test_preimage_storage() { match events[0] { Event::PaymentClaimable { ref purpose, .. } => { match &purpose { - PaymentPurpose::InvoicePayment { payment_preimage, .. } => { + PaymentPurpose::Bolt11InvoicePayment { payment_preimage, .. } => { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap()); }, - _ => panic!("expected PaymentPurpose::InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") } }, _ => panic!("Unexpected event"), @@ -8644,7 +8707,7 @@ fn test_concurrent_monitor_claim() { let height = HTLC_TIMEOUT_BROADCAST + 1; connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); check_closed_broadcast(&nodes[0], 1, true); - check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false, + check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false, [nodes[1].node.get_our_node_id()], 100000); watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height); check_added_monitors(&nodes[0], 1); @@ -8789,7 +8852,8 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain force_closing_node = 1; counterparty_node = 0; } - nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[force_closing_node], true); check_added_monitors!(nodes[force_closing_node], 1); check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000); @@ -8952,7 +9016,7 @@ fn test_duplicate_temporary_channel_id_from_different_peers() { // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0]. - open_chan_msg_chan_2_0.temporary_channel_id = open_chan_msg_chan_1_0.temporary_channel_id; + open_chan_msg_chan_2_0.common_fields.temporary_channel_id = open_chan_msg_chan_1_0.common_fields.temporary_channel_id; // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same // `temporary_channel_id` as they are from different peers. @@ -8963,7 +9027,7 @@ fn test_duplicate_temporary_channel_id_from_different_peers() { match &events[0] { MessageSendEvent::SendAcceptChannel { node_id, msg } => { assert_eq!(node_id, &nodes[1].node.get_our_node_id()); - assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id); + assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id); }, _ => panic!("Unexpected event"), } @@ -8976,7 +9040,7 @@ fn test_duplicate_temporary_channel_id_from_different_peers() { match &events[0] { MessageSendEvent::SendAcceptChannel { node_id, msg } => { assert_eq!(node_id, &nodes[2].node.get_our_node_id()); - assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id); + assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id); }, _ => panic!("Unexpected event"), } @@ -9096,11 +9160,11 @@ fn test_duplicate_funding_err_in_funding() { nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - let node_c_temp_chan_id = open_chan_msg.temporary_channel_id; - open_chan_msg.temporary_channel_id = real_channel_id; + let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id; + open_chan_msg.common_fields.temporary_channel_id = real_channel_id; nodes[1].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg); let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id()); - accept_chan_msg.temporary_channel_id = node_c_temp_chan_id; + accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id; nodes[2].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg); // Now that we have a second channel with the same funding txo, send a bogus funding message @@ -9158,7 +9222,7 @@ fn test_duplicate_chan_id() { // first (valid) and second (invalid) channels are closed, given they both have // the same non-temporary channel_id. However, currently we do not, so we just // move forward with it. - assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id); + assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id); assert_eq!(node_id, nodes[0].node.get_our_node_id()); }, _ => panic!("Unexpected event"), @@ -9192,7 +9256,7 @@ fn test_duplicate_chan_id() { // First try to open a second channel with a temporary channel id equal to the txid-based one. // Technically this is allowed by the spec, but we don't support it and there's little reason // to. Still, it shouldn't cause any other issues. - open_chan_msg.temporary_channel_id = channel_id; + open_chan_msg.common_fields.temporary_channel_id = channel_id; nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg); { let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -9201,7 +9265,7 @@ fn test_duplicate_chan_id() { MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { // Technically, at this point, nodes[1] would be justified in thinking both // channels are closed, but currently we do not, so we just move forward with it. - assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id); + assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id); assert_eq!(node_id, nodes[0].node.get_our_node_id()); }, _ => panic!("Unexpected event"), @@ -9222,7 +9286,7 @@ fn test_duplicate_chan_id() { // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we // try to create another channel. Instead, we drop the channel entirely here (leaving the // channelmanager in a possibly nonsense state instead). - match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() { + match a_peer_state.channel_by_id.remove(&open_chan_2_msg.common_fields.temporary_channel_id).unwrap() { ChannelPhase::UnfundedOutboundV1(mut chan) => { let logger = test_utils::TestLogger::new(); chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap() @@ -9377,7 +9441,7 @@ fn test_invalid_funding_tx() { let wit_program_script: ScriptBuf = wit_program.into(); for output in tx.output.iter_mut() { // Make the confirmed funding transaction have a bogus script_pubkey - output.script_pubkey = ScriptBuf::new_v0_p2wsh(&wit_program_script.wscript_hash()); + output.script_pubkey = ScriptBuf::new_p2wsh(&wit_program_script.wscript_hash()); } nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap(); @@ -9415,7 +9479,7 @@ fn test_invalid_funding_tx() { // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing // as its not 32 bytes long. let mut spend_tx = Transaction { - version: 2i32, lock_time: LockTime::ZERO, + version: Version::TWO, lock_time: LockTime::ZERO, input: tx.output.iter().enumerate().map(|(idx, _)| TxIn { previous_output: BitcoinOutPoint { txid: tx.txid(), @@ -9426,7 +9490,7 @@ fn test_invalid_funding_tx() { witness: Witness::from_slice(&channelmonitor::deliberately_bogus_accepted_htlc_witness()) }).collect(), output: vec![TxOut { - value: 1000, + value: Amount::from_sat(1000), script_pubkey: ScriptBuf::new(), }] }; @@ -9522,8 +9586,8 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id()); nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id()); - - nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[1], true); check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000); check_added_monitors!(nodes[1], 1); @@ -9790,7 +9854,9 @@ fn test_inconsistent_mpp_params() { assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None); - do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage); + do_claim_payment_along_route( + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], our_payment_preimage) + ); expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true); } @@ -9862,7 +9928,7 @@ enum ExposureEvent { AtUpdateFeeOutbound, } -fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool) { +fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool, apply_excess_fee: bool) { // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat` // policy. // @@ -9877,22 +9943,43 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let chanmon_cfgs = create_chanmon_cfgs(2); let mut config = test_default_channel_config(); + + // We hard-code the feerate values here but they're re-calculated furter down and asserted. + // If the values ever change below these constants should simply be updated. + const AT_FEE_OUTBOUND_HTLCS: u64 = 20; + let nondust_htlc_count_in_limit = + if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound { + AT_FEE_OUTBOUND_HTLCS + } else { 0 }; + let initial_feerate = if apply_excess_fee { 253 * 2 } else { 253 }; + let expected_dust_buffer_feerate = initial_feerate + 2530; + let mut commitment_tx_cost = commit_tx_fee_msat(initial_feerate - 253, nondust_htlc_count_in_limit, &ChannelTypeFeatures::empty()); + commitment_tx_cost += + if on_holder_tx { + htlc_success_tx_weight(&ChannelTypeFeatures::empty()) + } else { + htlc_timeout_tx_weight(&ChannelTypeFeatures::empty()) + } * (initial_feerate as u64 - 253) / 1000 * nondust_htlc_count_in_limit; + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = initial_feerate; + } config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit { // Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253 // to get roughly the same initial value as the default setting when this test was // originally written. - MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253) - } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000) }; // initial default setting value + MaxDustHTLCExposure::FeeRateMultiplier((5_000_000 + commitment_tx_cost) / 253) + } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000 + commitment_tx_cost) }; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap(); let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - open_channel.max_htlc_value_in_flight_msat = 50_000_000; - open_channel.max_accepted_htlcs = 60; + open_channel.common_fields.max_htlc_value_in_flight_msat = 50_000_000; + open_channel.common_fields.max_accepted_htlcs = 60; if on_holder_tx { - open_channel.dust_limit_satoshis = 546; + open_channel.common_fields.dust_limit_satoshis = 546; } nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel); let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); @@ -9926,6 +10013,11 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = 253; + } + // Fetch a route in advance as we will be unable to once we're unable to send. let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000); @@ -9935,16 +10027,25 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); let chan = chan_lock.channel_by_id.get(&channel_id).unwrap(); (chan.context().get_dust_buffer_feerate(None) as u64, - chan.context().get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator))) + chan.context().get_max_dust_htlc_exposure_msat(253)) }; - let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000; + assert_eq!(dust_buffer_feerate, expected_dust_buffer_feerate as u64); + let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000; let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat; - let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000; + // Substract 3 sats for multiplier and 2 sats for fixed limit to make sure we are 50% below the dust limit. + // This is to make sure we fully use the dust limit. If we don't, we could end up with `dust_ibd_htlc_on_holder_tx` being 1 + // while `max_dust_htlc_exposure_msat` is not equal to `dust_outbound_htlc_on_holder_tx_msat`. + let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000; let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat; + // This test was written with a fixed dust value here, which we retain, but assert that it is, + // indeed, dust on both transactions. let dust_htlc_on_counterparty_tx: u64 = 4; - let dust_htlc_on_counterparty_tx_msat: u64 = max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx; + let dust_htlc_on_counterparty_tx_msat: u64 = 1_250_000; + let calcd_dust_htlc_on_counterparty_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000; + assert!(dust_htlc_on_counterparty_tx_msat < dust_inbound_htlc_on_holder_tx_msat); + assert!(dust_htlc_on_counterparty_tx_msat < calcd_dust_htlc_on_counterparty_tx_msat); if on_holder_tx { if dust_outbound_balance { @@ -10014,7 +10115,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e // Outbound dust balance: 5200 sats nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", - dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4, + dust_htlc_on_counterparty_tx_msat * dust_htlc_on_counterparty_tx + commitment_tx_cost + 4, max_dust_htlc_exposure_msat), 1); } } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound { @@ -10022,7 +10123,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e // For the multiplier dust exposure limit, since it scales with feerate, // we need to add a lot of HTLCs that will become dust at the new feerate // to cross the threshold. - for _ in 0..20 { + for _ in 0..AT_FEE_OUTBOUND_HTLCS { let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None); nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); @@ -10041,27 +10142,123 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e added_monitors.clear(); } -fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool) { - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit); - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit); - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit); - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit); - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit); - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit); +fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool, apply_excess_fee: bool) { + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee); + if !multiplier_dust_limit && !apply_excess_fee { + // Because non-dust HTLC transaction fees are included in the dust exposure, trying to + // increase the fee to hit a higher dust exposure with a + // `MaxDustHTLCExposure::FeeRateMultiplier` is no longer super practical, so we skip these + // in the `multiplier_dust_limit` case. + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee); + do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee); + do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee); + } } #[test] fn test_max_dust_htlc_exposure() { - do_test_max_dust_htlc_exposure_by_threshold_type(false); - do_test_max_dust_htlc_exposure_by_threshold_type(true); + do_test_max_dust_htlc_exposure_by_threshold_type(false, false); + do_test_max_dust_htlc_exposure_by_threshold_type(false, true); + do_test_max_dust_htlc_exposure_by_threshold_type(true, false); + do_test_max_dust_htlc_exposure_by_threshold_type(true, true); } +#[test] +fn test_nondust_htlc_fees_are_dust() { + // Test that the transaction fees paid in nondust HTLCs count towards our dust limit + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + + let mut config = test_default_channel_config(); + // Set the dust limit to the default value + config.channel_config.max_dust_htlc_exposure = + MaxDustHTLCExposure::FeeRateMultiplier(10_000); + // Make sure the HTLC limits don't get in the way + config.channel_handshake_limits.min_max_accepted_htlcs = 400; + config.channel_handshake_config.our_max_accepted_htlcs = 400; + config.channel_handshake_config.our_htlc_minimum_msat = 1; + + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config), Some(config), Some(config)]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + // Create a channel from 1 -> 0 but immediately push all of the funds towards 0 + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 1, 0).2; + while nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat > 0 { + send_payment(&nodes[1], &[&nodes[0]], nodes[1].node.list_channels()[0].next_outbound_htlc_limit_msat); + } + + // First get the channel one HTLC_VALUE HTLC away from the dust limit by sending dust HTLCs + // repeatedly until we run out of space. + const HTLC_VALUE: u64 = 1_000_000; // Doesn't matter, tune until the test passes + let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE).0; + + while nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat == 0 { + route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE); + } + assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat, 0, + "We don't want to run out of ability to send because of some non-dust limit"); + assert!(nodes[0].node.list_channels()[0].pending_outbound_htlcs.len() < 10, + "We should be able to fill our dust limit without too many HTLCs"); + + let dust_limit = nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat; + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat, 0, + "Make sure we are able to send once we clear one HTLC"); + + // At this point we have somewhere between dust_limit and dust_limit * 2 left in our dust + // exposure limit, and we want to max that out using non-dust HTLCs. + let commitment_tx_per_htlc_cost = + htlc_success_tx_weight(&ChannelTypeFeatures::empty()) * 253; + let max_htlcs_remaining = dust_limit * 2 / commitment_tx_per_htlc_cost; + assert!(max_htlcs_remaining < 30, + "We should be able to fill our dust limit without too many HTLCs"); + for i in 0..max_htlcs_remaining + 1 { + assert_ne!(i, max_htlcs_remaining); + if nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat < dust_limit { + // We found our limit, and it was less than max_htlcs_remaining! + // At this point we can only send dust HTLCs as any non-dust HTLCs will overuse our + // remaining dust exposure. + break; + } + route_payment(&nodes[0], &[&nodes[1]], dust_limit * 2); + } + + // At this point non-dust HTLCs are no longer accepted from node 0 -> 1, we also check that + // such HTLCs can't be routed over the same channel either. + create_announced_chan_between_nodes(&nodes, 2, 0); + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[2], nodes[1], dust_limit * 2); + let onion = RecipientOnionFields::secret_only(payment_secret); + nodes[2].node.send_payment_with_route(&route, payment_hash, onion, PaymentId([0; 32])).unwrap(); + check_added_monitors(&nodes[2], 1); + let send = SendEvent::from_node(&nodes[2]); + + nodes[0].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send.msgs[0]); + commitment_signed_dance!(nodes[0], nodes[2], send.commitment_msg, false, true); + + expect_pending_htlcs_forwardable!(nodes[0]); + check_added_monitors(&nodes[0], 1); + let node_id_1 = nodes[1].node.get_our_node_id(); + expect_htlc_handling_failed_destinations!( + nodes[0].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(node_id_1), channel_id: chan_id_1 }] + ); + + let fail = get_htlc_update_msgs(&nodes[0], &nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_fail_htlc(&nodes[0].node.get_our_node_id(), &fail.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[2], nodes[0], fail.commitment_signed, false); + expect_payment_failed_conditions(&nodes[2], payment_hash, false, PaymentFailedConditions::new()); +} + + #[test] fn test_non_final_funding_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -10075,7 +10272,7 @@ fn test_non_final_funding_tx() { let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message); - let best_height = nodes[0].node.best_block.read().unwrap().height(); + let best_height = nodes[0].node.best_block.read().unwrap().height; let chan_id = *nodes[0].network_chan_count.borrow(); let events = nodes[0].node.get_and_clear_pending_events(); @@ -10084,8 +10281,8 @@ fn test_non_final_funding_tx() { let mut tx = match events[0] { Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => { // Timelock the transaction _beyond_ the best client height + 1. - Transaction { version: chan_id as i32, lock_time: LockTime::from_height(best_height + 2).unwrap(), input: vec![input], output: vec![TxOut { - value: *channel_value_satoshis, script_pubkey: output_script.clone(), + Transaction { version: Version(chan_id as i32), lock_time: LockTime::from_height(best_height + 2).unwrap(), input: vec![input], output: vec![TxOut { + value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(), }]} }, _ => panic!("Unexpected event"), @@ -10097,14 +10294,9 @@ fn test_non_final_funding_tx() { }, _ => panic!() } - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::ChannelClosed { channel_id, .. } => { - assert_eq!(channel_id, temp_channel_id); - }, - _ => panic!("Unexpected event"), - } + let err = "Error in transaction funding: Misuse error: Funding transaction absolute timelock is non-final".to_owned(); + check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_channel_id, false, ClosureReason::ProcessingError { err })]); + assert_eq!(get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()).data, "Failed to fund channel"); } #[test] @@ -10120,7 +10312,7 @@ fn test_non_final_funding_tx_within_headroom() { let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message); - let best_height = nodes[0].node.best_block.read().unwrap().height(); + let best_height = nodes[0].node.best_block.read().unwrap().height; let chan_id = *nodes[0].network_chan_count.borrow(); let events = nodes[0].node.get_and_clear_pending_events(); @@ -10129,8 +10321,8 @@ fn test_non_final_funding_tx_within_headroom() { let mut tx = match events[0] { Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => { // Timelock the transaction within a +1 headroom from the best block. - Transaction { version: chan_id as i32, lock_time: LockTime::from_consensus(best_height + 1), input: vec![input], output: vec![TxOut { - value: *channel_value_satoshis, script_pubkey: output_script.clone(), + Transaction { version: Version(chan_id as i32), lock_time: LockTime::from_consensus(best_height + 1), input: vec![input], output: vec![TxOut { + value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(), }]} }, _ => panic!("Unexpected event"), @@ -10509,6 +10701,90 @@ fn test_remove_expired_inbound_unfunded_channels() { check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000); } +#[test] +fn test_channel_close_when_not_timely_accepted() { + // Create network of two nodes + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Simulate peer-disconnects mid-handshake + // The channel is initiated from the node 0 side, + // but the nodes disconnect before node 1 could send accept channel + let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + // Make sure that we have not removed the OutboundV1Channel from node[0] immediately. + assert_eq!(nodes[0].node.list_channels().len(), 1); + + // Since channel was inbound from node[1] perspective, it should have been dropped immediately. + assert_eq!(nodes[1].node.list_channels().len(), 0); + + // In the meantime, some time passes. + for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS { + nodes[0].node.timer_tick_occurred(); + } + + // Since we disconnected from peer and did not connect back within time, + // we should have forced-closed the channel by now. + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + assert_eq!(nodes[0].node.list_channels().len(), 0); + + { + // Since accept channel message was never received + // The channel should be forced close by now from node 0 side + // and the peer removed from per_peer_state + let node_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + assert_eq!(node_0_per_peer_state.len(), 0); + } +} + +#[test] +fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() { + // Create network of two nodes + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Simulate peer-disconnects mid-handshake + // The channel is initiated from the node 0 side, + // but the nodes disconnect before node 1 could send accept channel + let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + // Make sure that we have not removed the OutboundV1Channel from node[0] immediately. + assert_eq!(nodes[0].node.list_channels().len(), 1); + + // Since channel was inbound from node[1] perspective, it should have been immediately dropped. + assert_eq!(nodes[1].node.list_channels().len(), 0); + + // The peers now reconnect + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + // Make sure the SendOpenChannel message is added to node_0 pending message events + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + match &msg_events[0] { + MessageSendEvent::SendOpenChannel { msg, .. } => assert_eq!(msg, &open_channel_msg), + _ => panic!("Unexpected message."), + } +} + fn do_test_multi_post_event_actions(do_reload: bool) { // Tests handling multiple post-Event actions at once. // There is specific code in ChannelManager to handle channels where multiple post-Event @@ -10665,7 +10941,9 @@ fn test_batch_channel_open() { } #[test] -fn test_disconnect_in_funding_batch() { +fn test_close_in_funding_batch() { + // This test ensures that if one of the channels + // in the batch closes, the complete batch will close. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); @@ -10689,14 +10967,39 @@ fn test_disconnect_in_funding_batch() { // The transaction should not have been broadcast before all channels are ready. assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0); - // The remaining peer in the batch disconnects. - nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id()); - - // The channels in the batch will close immediately. + // Force-close the channel for which we've completed the initial monitor. let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 }; let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 }; let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1); let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2); + let error_message = "Channel force-closed"; + nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + + // The monitor should become closed. + check_added_monitors(&nodes[0], 1); + { + let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap(); + let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap(); + assert_eq!(monitor_updates_1.len(), 1); + assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID); + } + + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + match msg_events[0] { + MessageSendEvent::HandleError { .. } => (), + _ => panic!("Unexpected message."), + } + + // We broadcast the commitment transaction as part of the force-close. + { + let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(broadcasted_txs.len(), 1); + assert!(broadcasted_txs[0].txid() != tx.txid()); + assert_eq!(broadcasted_txs[0].input.len(), 1); + assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid()); + } + + // All channels in the batch should close immediately. check_closed_events(&nodes[0], &[ ExpectedCloseEvent { channel_id: Some(channel_id_1), @@ -10714,19 +11017,6 @@ fn test_disconnect_in_funding_batch() { }, ]); - // The monitor should become closed. - check_added_monitors(&nodes[0], 1); - { - let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap(); - let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap(); - assert_eq!(monitor_updates_1.len(), 1); - assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID); - } - - // The funding transaction should not have been broadcast, and therefore, we don't need - // to broadcast a force-close transaction for the closed monitor. - assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0); - // Ensure the channels don't exist anymore. assert!(nodes[0].node.list_channels().is_empty()); } @@ -10771,7 +11061,8 @@ fn test_batch_funding_close_after_funding_signed() { let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 }; let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1); let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2); - nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); check_added_monitors(&nodes[0], 2); { let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap(); @@ -10840,12 +11131,12 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen } else { (&nodes[0], &nodes[1]) }; - - closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id(), error_message.to_string()).unwrap(); let mut msg_events = closing_node.node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); match msg_events.pop().unwrap() { - MessageSendEvent::HandleError { action: msgs::ErrorAction::DisconnectPeer { .. }, .. } => {}, + MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage { .. }, .. } => {}, _ => panic!("Unexpected event"), } check_added_monitors(closing_node, 1); @@ -10875,3 +11166,36 @@ fn test_funding_and_commitment_tx_confirm_same_block() { do_test_funding_and_commitment_tx_confirm_same_block(false); do_test_funding_and_commitment_tx_confirm_same_block(true); } + +#[test] +fn test_accept_inbound_channel_errors_queued() { + // For manually accepted inbound channels, tests that a close error is correctly handled + // and the channel fails for the initiator. + let mut config0 = test_default_channel_config(); + let mut config1 = config0.clone(); + config1.channel_handshake_limits.their_to_self_delay = 1000; + config1.manually_accept_inbound_channels = true; + config0.channel_handshake_config.our_to_self_delay = 2000; + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id, .. } => { + match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23) { + Err(APIError::ChannelUnavailable { err: _ }) => (), + _ => panic!(), + } + } + _ => panic!("Unexpected event"), + } + assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, + open_channel_msg.common_fields.temporary_channel_id); +}