X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=56586b0a29c569be57700a316e109e1bb76edb24;hb=783e8188a7bd6470d5926bf5d1bae1350996801a;hp=7e2532f1f4f6774774dfca88914e38cf0e4e6fd6;hpb=be6f263825e0c75d32d6d48fd5dff9986ca6b011;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 7e2532f1..56586b0a 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -18,6 +18,7 @@ use crate::chain::channelmonitor; use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use crate::chain::transaction::OutPoint; use crate::chain::keysinterface::{ChannelSigner, EcdsaChannelSigner, EntropySource}; +use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination}; use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA}; @@ -31,9 +32,9 @@ use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; use crate::util::enforcing_trait_impls::EnforcingSigner; use crate::util::test_utils; -use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; use crate::util::errors::APIError; use crate::util::ser::{Writeable, ReadableArgs}; +use crate::util::string::UntrustedString; use crate::util::config::UserConfig; use bitcoin::hash_types::BlockHash; @@ -94,7 +95,7 @@ fn test_insane_channel_opens() { if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { match action { &ErrorAction::SendErrorMessage { .. } => { - nodes[1].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), expected_regex, 1); + nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1); }, _ => panic!("unexpected event!"), } @@ -1118,7 +1119,7 @@ fn holding_cell_htlc_counting() { unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)), true, APIError::ChannelUnavailable { ref err }, assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err))); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1); } // This should also be true if we try to forward a payment. @@ -1346,7 +1347,7 @@ fn test_basic_channel_reserve() { _ => panic!("Unexpected error variant"), } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put our balance under counterparty-announced channel reserve value".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 1); send_payment(&nodes[0], &vec![&nodes[1]], max_can_send); } @@ -1811,7 +1812,7 @@ fn test_channel_reserve_holding_cell_htlcs() { unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err }, assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err))); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1); } // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete @@ -1906,7 +1907,7 @@ fn test_channel_reserve_holding_cell_htlcs() { unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err }, assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err))); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put our balance under counterparty-announced channel reserve value".to_string(), 2); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 2); } let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); @@ -2730,20 +2731,22 @@ fn test_htlc_on_chain_success() { } let chan_id = Some(chan_1.2); match forwarded_events[1] { - Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => { + Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => { assert_eq!(fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, chan_id); assert_eq!(claim_from_onchain_tx, true); assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!() } match forwarded_events[2] { - Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => { + Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => { assert_eq!(fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, chan_id); assert_eq!(claim_from_onchain_tx, true); assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!() } @@ -2842,7 +2845,7 @@ fn test_htlc_on_chain_success() { assert_eq!(commitment_spend.input.len(), 2); assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - assert_eq!(commitment_spend.lock_time.0, 0); + assert_eq!(commitment_spend.lock_time.0, nodes[1].best_block_info().1 + 1); assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment // We don't bother to check that B can claim the HTLC output on its commitment tx here as // we already checked the same situation with A. @@ -3170,7 +3173,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), if deliver_bs_raa { 2 + nodes.len() - 1 } else { 3 + nodes.len() }); + assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() }); match events[0] { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { }, _ => panic!("Unexepected event"), @@ -3181,20 +3184,11 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use }, _ => panic!("Unexpected event"), } - if !deliver_bs_raa { - match events[2] { - Event::PendingHTLCsForwardable { .. } => { }, - _ => panic!("Unexpected event"), - }; - nodes[1].node.abandon_payment(PaymentId(fourth_payment_hash.0)); - let payment_failed_events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(payment_failed_events.len(), 1); - match payment_failed_events[0] { - Event::PaymentFailed { ref payment_hash, .. } => { - assert_eq!(*payment_hash, fourth_payment_hash); - }, - _ => panic!("Unexpected event"), - } + match events[2] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(*payment_hash, fourth_payment_hash); + }, + _ => panic!("Unexpected event"), } nodes[1].node.process_pending_htlc_forwards(); @@ -3242,29 +3236,45 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 3); + assert_eq!(events.len(), 6); match events[0] { - Event::PaymentPathFailed { ref payment_hash, ref network_update, .. } => { + Event::PaymentPathFailed { ref payment_hash, ref failure, .. } => { assert!(failed_htlcs.insert(payment_hash.0)); // If we delivered B's RAA we got an unknown preimage error, not something // that we should update our routing table for. if !deliver_bs_raa { - assert!(network_update.is_some()); + if let PathFailure::OnPath { network_update: Some(_) } = failure { } else { panic!("Unexpected path failure") } } }, _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentPathFailed { ref payment_hash, ref network_update, .. } => { - assert!(failed_htlcs.insert(payment_hash.0)); - assert!(network_update.is_some()); + Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(*payment_hash, first_payment_hash); }, _ => panic!("Unexpected event"), } match events[2] { - Event::PaymentPathFailed { ref payment_hash, ref network_update, .. } => { + Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => { assert!(failed_htlcs.insert(payment_hash.0)); - assert!(network_update.is_some()); + }, + _ => panic!("Unexpected event"), + } + match events[3] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(*payment_hash, second_payment_hash); + }, + _ => panic!("Unexpected event"), + } + match events[4] { + Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => { + assert!(failed_htlcs.insert(payment_hash.0)); + }, + _ => panic!("Unexpected event"), + } + match events[5] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(*payment_hash, third_payment_hash); }, _ => panic!("Unexpected event"), } @@ -3354,7 +3364,7 @@ fn fail_backward_pending_htlc_upon_channel_failure() { nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc); } let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); + assert_eq!(events.len(), 3); // Check that Alice fails backward the pending HTLC from the second payment. match events[0] { Event::PaymentPathFailed { payment_hash, .. } => { @@ -3363,6 +3373,12 @@ fn fail_backward_pending_htlc_upon_channel_failure() { _ => panic!("Unexpected event"), } match events[1] { + Event::PaymentFailed { payment_hash, .. } => { + assert_eq!(payment_hash, failed_payment_hash); + }, + _ => panic!("Unexpected event"), + } + match events[2] { Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => { assert_eq!(err, "Remote side tried to send a 0-msat HTLC"); }, @@ -3594,7 +3610,7 @@ fn test_simple_peer_disconnect() { reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false)); { let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 3); + assert_eq!(events.len(), 4); match events[0] { Event::PaymentSent { payment_preimage, payment_hash, .. } => { assert_eq!(payment_preimage, payment_preimage_3); @@ -3603,14 +3619,20 @@ fn test_simple_peer_disconnect() { _ => panic!("Unexpected event"), } match events[1] { + Event::PaymentPathSuccessful { .. } => {}, + _ => panic!("Unexpected event"), + } + match events[2] { Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => { assert_eq!(payment_hash, payment_hash_5); assert!(payment_failed_permanently); }, _ => panic!("Unexpected event"), } - match events[2] { - Event::PaymentPathSuccessful { .. } => {}, + match events[3] { + Event::PaymentFailed { payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_5); + }, _ => panic!("Unexpected event"), } } @@ -3958,10 +3980,10 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -4064,7 +4086,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. let payment_id = PaymentId([42; 32]); let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &route).unwrap(); - nodes[0].node.send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); + nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4633,11 +4655,12 @@ fn test_onchain_to_onchain_claim() { _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id } => { + Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => { assert_eq!(fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, Some(chan_1.2)); assert_eq!(claim_from_onchain_tx, true); assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!("Unexpected event"), } @@ -4679,7 +4702,7 @@ fn test_onchain_to_onchain_claim() { check_spends!(b_txn[0], commitment_tx[0]); assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment - assert_eq!(b_txn[0].lock_time.0, 0); // Success tx + assert_eq!(b_txn[0].lock_time.0, nodes[1].best_block_info().1 + 1); // Success tx check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); @@ -5123,20 +5146,21 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno } let as_events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(as_events.len(), if announce_latest { 5 } else { 3 }); + assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 }); let mut as_failds = HashSet::new(); let mut as_updates = 0; for event in as_events.iter() { - if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref network_update, .. } = event { + if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event { assert!(as_failds.insert(*payment_hash)); if *payment_hash != payment_hash_2 { assert_eq!(*payment_failed_permanently, deliver_last_raa); } else { assert!(!payment_failed_permanently); } - if network_update.is_some() { + if let PathFailure::OnPath { network_update: Some(_) } = failure { as_updates += 1; } + } else if let &Event::PaymentFailed { .. } = event { } else { panic!("Unexpected event"); } } assert!(as_failds.contains(&payment_hash_1)); @@ -5148,20 +5172,21 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno assert!(as_failds.contains(&payment_hash_6)); let bs_events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(bs_events.len(), if announce_latest { 4 } else { 3 }); + assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 }); let mut bs_failds = HashSet::new(); let mut bs_updates = 0; for event in bs_events.iter() { - if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref network_update, .. } = event { + if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event { assert!(bs_failds.insert(*payment_hash)); if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 { assert_eq!(*payment_failed_permanently, deliver_last_raa); } else { assert!(!payment_failed_permanently); } - if network_update.is_some() { + if let PathFailure::OnPath { network_update: Some(_) } = failure { bs_updates += 1; } + } else if let &Event::PaymentFailed { .. } = event { } else { panic!("Unexpected event"); } } assert!(bs_failds.contains(&payment_hash_1)); @@ -5257,7 +5282,7 @@ fn test_key_derivation_params() { let seed = [42; 32]; let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet); let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager); - let network_graph = Arc::new(NetworkGraph::new(chanmon_cfgs[0].chain_source.genesis_hash, &chanmon_cfgs[0].logger)); + let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger)); let scorer = Mutex::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph.clone(), &scorer); let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) }; @@ -5670,18 +5695,22 @@ fn test_fail_holding_cell_htlc_upon_free() { // Check that the payment failed to be sent out. let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); + assert_eq!(events.len(), 2); match &events[0] { - &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, ref network_update, ref all_paths_failed, ref short_channel_id, .. } => { + &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => { assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap()); assert_eq!(our_payment_hash.clone(), *payment_hash); assert_eq!(*payment_failed_permanently, false); - assert_eq!(*all_paths_failed, true); - assert_eq!(*network_update, None); assert_eq!(*short_channel_id, Some(route.paths[0][0].short_channel_id)); }, _ => panic!("Unexpected event"), } + match &events[1] { + &Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(our_payment_hash.clone(), *payment_hash); + }, + _ => panic!("Unexpected event"), + } } // Test that if multiple HTLCs are released from the holding cell and one is @@ -5755,18 +5784,22 @@ fn test_free_and_fail_holding_cell_htlcs() { // Check that the second payment failed to be sent out. let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); + assert_eq!(events.len(), 2); match &events[0] { - &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, ref network_update, ref all_paths_failed, ref short_channel_id, .. } => { + &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => { assert_eq!(payment_id_2, *payment_id.as_ref().unwrap()); assert_eq!(payment_hash_2.clone(), *payment_hash); assert_eq!(*payment_failed_permanently, false); - assert_eq!(*all_paths_failed, true); - assert_eq!(*network_update, None); assert_eq!(*short_channel_id, Some(route_2.paths[0][0].short_channel_id)); }, _ => panic!("Unexpected event"), } + match &events[1] { + &Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(payment_hash_2.clone(), *payment_hash); + }, + _ => panic!("Unexpected event"), + } // Complete the first payment and the RAA from the fee update. let (payment_event, send_raa_event) = { @@ -5958,7 +5991,7 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err }, assert!(regex::Regex::new(r"Cannot send less than their minimum HTLC value \(\d+\)").unwrap().is_match(err))); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send less than their minimum HTLC value".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send less than their minimum HTLC value", 1); } #[test] @@ -5976,7 +6009,7 @@ fn test_update_add_htlc_bolt2_sender_zero_value_msat() { assert_eq!(err, "Cannot send 0-msat HTLC")); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send 0-msat HTLC".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1); } #[test] @@ -6059,7 +6092,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err))); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1); } #[test] @@ -6083,7 +6116,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err))); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1); send_payment(&nodes[0], &[&nodes[1]], max_in_flight); } @@ -6259,10 +6292,10 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { //Disconnect and Reconnect nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); @@ -6649,13 +6682,12 @@ fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { } let events_5 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_5.len(), 1); + assert_eq!(events_5.len(), 2); // Expect a PaymentPathFailed event with a ChannelFailure network update for the channel between // the node originating the error to its next hop. match events_5[0] { - Event::PaymentPathFailed { network_update: - Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }), error_code, .. + Event::PaymentPathFailed { error_code, failure: PathFailure::OnPath { network_update: Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) }, .. } => { assert_eq!(short_channel_id, chan_2.0.contents.short_channel_id); assert!(is_permanent); @@ -6663,6 +6695,12 @@ fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { }, _ => panic!("Unexpected event"), } + match events_5[1] { + Event::PaymentFailed { payment_hash, .. } => { + assert_eq!(payment_hash, our_payment_hash); + }, + _ => panic!("Unexpected event"), + } // TODO: Test actual removal of channel from NetworkGraph when it's implemented. } @@ -6734,7 +6772,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let events = nodes[0].node.get_and_clear_pending_events(); // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx - assert_eq!(events.len(), 2); + assert_eq!(events.len(), 4); let mut first_failed = false; for event in events { match event { @@ -6745,7 +6783,8 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { } else { assert_eq!(payment_hash, payment_hash_2); } - } + }, + Event::PaymentFailed { .. } => {} _ => panic!("Unexpected event"), } } @@ -6824,7 +6863,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { if !revoked { assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); } else { - assert_eq!(timeout_tx[0].lock_time.0, 0); + assert_eq!(timeout_tx[0].lock_time.0, 12); } // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx mine_transaction(&nodes[0], &timeout_tx[0]); @@ -7011,10 +7050,10 @@ fn test_announce_disable_channels() { } } // Reconnect peers - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 3); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 3); @@ -7890,6 +7929,171 @@ fn test_can_not_accept_unknown_inbound_channel() { } } +#[test] +fn test_onion_value_mpp_set_calculation() { + // Test that we use the onion value `amt_to_forward` when + // calculating whether we've reached the `total_msat` of an MPP + // by having a routing node forward more than `amt_to_forward` + // and checking that the receiving node doesn't generate + // a PaymentClaimable event too early + let node_count = 4; + let chanmon_cfgs = create_chanmon_cfgs(node_count); + let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]); + let mut nodes = create_network(node_count, &node_cfgs, &node_chanmgrs); + + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; + let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; + let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; + let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id; + + let total_msat = 100_000; + let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat); + let sample_path = route.paths.pop().unwrap(); + + let mut path_1 = sample_path.clone(); + path_1[0].pubkey = nodes[1].node.get_our_node_id(); + path_1[0].short_channel_id = chan_1_id; + path_1[1].pubkey = nodes[3].node.get_our_node_id(); + path_1[1].short_channel_id = chan_3_id; + path_1[1].fee_msat = 100_000; + route.paths.push(path_1); + + let mut path_2 = sample_path.clone(); + path_2[0].pubkey = nodes[2].node.get_our_node_id(); + path_2[0].short_channel_id = chan_2_id; + path_2[1].pubkey = nodes[3].node.get_our_node_id(); + path_2[1].short_channel_id = chan_4_id; + path_2[1].fee_msat = 1_000; + route.paths.push(path_2); + + // Send payment + let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); + let onion_session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(our_payment_secret), payment_id, &route).unwrap(); + nodes[0].node.test_send_payment_internal(&route, our_payment_hash, &Some(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap(); + check_added_monitors!(nodes[0], expected_paths.len()); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), expected_paths.len()); + + // First path + let ev = remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events); + let mut payment_event = SendEvent::from_event(ev); + let mut prev_node = &nodes[0]; + + for (idx, &node) in expected_paths[0].iter().enumerate() { + assert_eq!(node.node.get_our_node_id(), payment_event.node_id); + + if idx == 0 { // routing node + let session_priv = [3; 32]; + let height = nodes[0].best_block_info().1; + let session_priv = SecretKey::from_slice(&session_priv).unwrap(); + let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); + let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000, &Some(our_payment_secret), height + 1, &None).unwrap(); + // Edit amt_to_forward to simulate the sender having set + // the final amount and the routing node taking less fee + onion_payloads[1].amt_to_forward = 99_000; + let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash); + payment_event.msgs[0].onion_routing_packet = new_onion_packet; + } + + node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(node, 0); + commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(node); + + if idx == 0 { + let mut events_2 = node.node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 1); + check_added_monitors!(node, 1); + payment_event = SendEvent::from_event(events_2.remove(0)); + assert_eq!(payment_event.msgs.len(), 1); + } else { + let events_2 = node.node.get_and_clear_pending_events(); + assert!(events_2.is_empty()); + } + + prev_node = node; + } + + // Second path + let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events); + pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None); + + claim_payment_along_route(&nodes[0], expected_paths, false, our_payment_preimage); +} + +fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) { + + let routing_node_count = msat_amounts.len(); + let node_count = routing_node_count + 2; + + let chanmon_cfgs = create_chanmon_cfgs(node_count); + let node_cfgs = create_node_cfgs(node_count, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]); + let nodes = create_network(node_count, &node_cfgs, &node_chanmgrs); + + let src_idx = 0; + let dst_idx = 1; + + // Create channels for each amount + let mut expected_paths = Vec::with_capacity(routing_node_count); + let mut src_chan_ids = Vec::with_capacity(routing_node_count); + let mut dst_chan_ids = Vec::with_capacity(routing_node_count); + for i in 0..routing_node_count { + let routing_node = 2 + i; + let src_chan_id = create_announced_chan_between_nodes(&nodes, src_idx, routing_node).0.contents.short_channel_id; + src_chan_ids.push(src_chan_id); + let dst_chan_id = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx).0.contents.short_channel_id; + dst_chan_ids.push(dst_chan_id); + let path = vec![&nodes[routing_node], &nodes[dst_idx]]; + expected_paths.push(path); + } + let expected_paths: Vec<&[&Node]> = expected_paths.iter().map(|route| route.as_slice()).collect(); + + // Create a route for each amount + let example_amount = 100000; + let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount); + let sample_path = route.paths.pop().unwrap(); + for i in 0..routing_node_count { + let routing_node = 2 + i; + let mut path = sample_path.clone(); + path[0].pubkey = nodes[routing_node].node.get_our_node_id(); + path[0].short_channel_id = src_chan_ids[i]; + path[1].pubkey = nodes[dst_idx].node.get_our_node_id(); + path[1].short_channel_id = dst_chan_ids[i]; + path[1].fee_msat = msat_amounts[i]; + route.paths.push(path); + } + + // Send payment with manually set total_msat + let payment_id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes()); + let onion_session_privs = nodes[src_idx].node.test_add_new_pending_payment(our_payment_hash, Some(our_payment_secret), payment_id, &route).unwrap(); + nodes[src_idx].node.test_send_payment_internal(&route, our_payment_hash, &Some(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap(); + check_added_monitors!(nodes[src_idx], expected_paths.len()); + + let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), expected_paths.len()); + let mut amount_received = 0; + for (path_idx, expected_path) in expected_paths.iter().enumerate() { + let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events); + + let current_path_amount = msat_amounts[path_idx]; + amount_received += current_path_amount; + let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat; + pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None); + } + + claim_payment_along_route(&nodes[src_idx], &expected_paths, false, our_payment_preimage); +} + +#[test] +fn test_overshoot_mpp() { + do_test_overshoot_mpp(&[100_000, 101_000], 200_000); + do_test_overshoot_mpp(&[100_000, 10_000, 100_000], 200_000); +} + #[test] fn test_simple_mpp() { // Simple test of sending a multi-path payment. @@ -8115,12 +8319,13 @@ fn test_update_err_monitor_lockdown() { let logger = test_utils::TestLogger::with_id(format!("node {}", 0)); let persister = test_utils::TestPersister::new(); let watchtower = { - let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); - let mut w = test_utils::TestVecWriter(Vec::new()); - monitor.write(&mut w).unwrap(); - let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( - &mut io::Cursor::new(&w.0), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; - assert!(new_monitor == *monitor); + let new_monitor = { + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); + let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( + &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; + assert!(new_monitor == *monitor); + new_monitor + }; let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed); watchtower @@ -8144,7 +8349,7 @@ fn test_update_err_monitor_lockdown() { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2); - if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { + if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure); assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } @@ -8182,12 +8387,13 @@ fn test_concurrent_monitor_claim() { let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice")); let persister = test_utils::TestPersister::new(); let watchtower_alice = { - let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); - let mut w = test_utils::TestVecWriter(Vec::new()); - monitor.write(&mut w).unwrap(); - let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( - &mut io::Cursor::new(&w.0), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; - assert!(new_monitor == *monitor); + let new_monitor = { + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); + let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( + &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; + assert!(new_monitor == *monitor); + new_monitor + }; let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed); watchtower @@ -8211,12 +8417,13 @@ fn test_concurrent_monitor_claim() { let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob")); let persister = test_utils::TestPersister::new(); let watchtower_bob = { - let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); - let mut w = test_utils::TestVecWriter(Vec::new()); - monitor.write(&mut w).unwrap(); - let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( - &mut io::Cursor::new(&w.0), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; - assert!(new_monitor == *monitor); + let new_monitor = { + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); + let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( + &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; + assert!(new_monitor == *monitor); + new_monitor + }; let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed); watchtower @@ -8238,7 +8445,7 @@ fn test_concurrent_monitor_claim() { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2); - if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { + if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure); assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); @@ -8306,7 +8513,7 @@ fn test_pre_lockin_no_chan_closed_update() { let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id(); nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() }); assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty()); - check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: "Hi".to_string() }, true); + check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true); } #[test] @@ -8694,9 +8901,9 @@ fn test_duplicate_chan_id() { }; check_added_monitors!(nodes[0], 0); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created); - // At this point we'll try to add a duplicate channel monitor, which will be rejected, but - // still needs to be cleared here. - check_added_monitors!(nodes[1], 1); + // At this point we'll look up if the channel_id is present and immediately fail the channel + // without trying to persist the `ChannelMonitor`. + check_added_monitors!(nodes[1], 0); // ...still, nodes[1] will reject the duplicate channel. { @@ -8764,7 +8971,7 @@ fn test_error_chans_closed() { nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], false); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() }); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); assert_eq!(nodes[0].node.list_usable_channels().len(), 2); assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2); @@ -8774,7 +8981,7 @@ fn test_error_chans_closed() { let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() }); check_added_monitors!(nodes[0], 2); - check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() }); + check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); match events[0] { @@ -9032,9 +9239,11 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); let failure_events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(failure_events.len(), 2); + assert_eq!(failure_events.len(), 4); if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); } - if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); } + if let Event::PaymentFailed { .. } = failure_events[1] {} else { panic!(); } + if let Event::PaymentPathFailed { .. } = failure_events[2] {} else { panic!(); } + if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); } } else { // Let the second HTLC fail and claim the first expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]); @@ -9045,7 +9254,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); - expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain()); + expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new()); claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage); } @@ -9090,7 +9299,6 @@ fn test_inconsistent_mpp_params() { if path_a[0].pubkey == nodes[1].node.get_our_node_id() { core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } }); - let payment_params_opt = Some(payment_params); let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]); @@ -9104,20 +9312,20 @@ fn test_inconsistent_mpp_params() { dup_route.paths.push(route.paths[1].clone()); nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(our_payment_secret), payment_id, &dup_route).unwrap() }; - { - nodes[0].node.send_payment_along_path(&route.paths[0], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); - check_added_monitors!(nodes[0], 1); + nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); + check_added_monitors!(nodes[0], 1); + { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None); } assert!(nodes[3].node.get_and_clear_pending_events().is_empty()); - { - nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap(); - check_added_monitors!(nodes[0], 1); + nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash, &Some(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap(); + check_added_monitors!(nodes[0], 1); + { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -9160,14 +9368,34 @@ fn test_inconsistent_mpp_params() { expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain()); - nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[2]).unwrap(); + nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[2]).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None); - claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage); + do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 3); + match events[0] { + Event::PaymentSent { payment_hash, .. } => { // The payment was abandoned earlier, so the fee paid will be None + assert_eq!(payment_hash, our_payment_hash); + }, + _ => panic!("Unexpected event") + } + match events[1] { + Event::PaymentPathSuccessful { payment_hash, .. } => { + assert_eq!(payment_hash.unwrap(), our_payment_hash); + }, + _ => panic!("Unexpected event") + } + match events[2] { + Event::PaymentPathSuccessful { payment_hash, .. } => { + assert_eq!(payment_hash.unwrap(), our_payment_hash); + }, + _ => panic!("Unexpected event") + } } #[test] @@ -9184,7 +9412,6 @@ fn test_keysend_payments_to_public_node() { let route_params = RouteParameters { payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), final_value_msat: 10000, - final_cltv_expiry_delta: 40, }; let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); @@ -9215,7 +9442,6 @@ fn test_keysend_payments_to_private_node() { let route_params = RouteParameters { payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), final_value_msat: 10000, - final_cltv_expiry_delta: 40, }; let network_graph = nodes[0].network_graph.clone(); let first_hops = nodes[0].node.list_usable_channels(); @@ -9445,7 +9671,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e } nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); - nodes[0].logger.assert_log_contains("lightning::ln::channel".to_string(), "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1); } let _ = nodes[0].node.get_and_clear_pending_msg_events();