X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fpayment_tests.rs;h=99c8682738da55467b5866f0c8c751047ff2c001;hb=29e34c8a10cf813116c9251188a86978cc97e259;hp=6fcb18cf00bc925a5cb4318c838841ccf339baee;hpb=ec7ccf0415d665441d74edbc479fb9ad357c2751;p=rust-lightning diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 6fcb18cf..99c86827 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -15,6 +15,7 @@ use chain::{ChannelMonitorUpdateErr, Confirm, Listen, Watch}; use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS}; use chain::transaction::OutPoint; use chain::keysinterface::KeysInterface; +use ln::channel::EXPIRE_PREV_CONFIG_TICKS; use ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, ChannelManagerReadArgs, MPP_TIMEOUT_TICKS, PaymentId, PaymentSendFailure}; use ln::features::{InitFeatures, InvoiceFeatures}; use ln::msgs; @@ -70,7 +71,7 @@ fn retry_single_path_payment() { check_added_monitors!(nodes[1], 1); nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false); - expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain()); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain()); // Rebalance the channel so the retry succeeds. send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000); @@ -173,7 +174,7 @@ fn mpp_retry() { check_added_monitors!(nodes[2], 1); nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false); - expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain()); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain()); // Rebalance the channel so the second half of the payment can succeed. send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000); @@ -251,7 +252,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { check_added_monitors!(nodes[1], 1); commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates_1_0.commitment_signed, false); - expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..])); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..])); } else { // Pass half of the payment along the second path. pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), true, None); @@ -521,7 +522,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { confirm_transaction(&nodes[0], &first_htlc_timeout_tx); } nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); - expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain()); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain()); // Finally, retry the payment (which was reloaded from the ChannelMonitor when nodes[0] was // reloaded) via a route over the new channel, which work without issue and eventually be @@ -531,9 +532,19 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // Update the fee on the middle hop to ensure PaymentSent events have the correct (retried) fee // and not the original fee. We also update node[1]'s relevant config as // do_claim_payment_along_route expects us to never overpay. - nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan_id_2).unwrap() - .config.mutable.forwarding_fee_base_msat += 100_000; - new_route.paths[0][0].fee_msat += 100_000; + { + let mut channel_state = nodes[1].node.channel_state.lock().unwrap(); + let mut channel = channel_state.by_id.get_mut(&chan_id_2).unwrap(); + let mut new_config = channel.config(); + new_config.forwarding_fee_base_msat += 100_000; + channel.update_config(&new_config); + new_route.paths[0][0].fee_msat += 100_000; + } + + // Force expiration of the channel's previous config. + for _ in 0..EXPIRE_PREV_CONFIG_TICKS { + nodes[1].node.timer_tick_occurred(); + } assert!(nodes[0].node.retry_payment(&new_route, payment_id_1).is_err()); // Shouldn't be allowed to retry a fulfilled payment nodes[0].node.retry_payment(&new_route, payment_id).unwrap(); @@ -573,7 +584,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co // Route a payment, but force-close the channel before the HTLC fulfill message arrives at // nodes[0]. let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000); - nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); @@ -834,3 +845,132 @@ fn get_ldk_payment_preimage() { pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, payment_hash, Some(payment_secret), events.pop().unwrap(), true, Some(payment_preimage)); claim_payment_along_route(&nodes[0], &[&[&nodes[1]]], false, payment_preimage); } + +#[test] +fn sent_probe_is_probe_of_sending_node() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + + // First check we refuse to build a single-hop probe + let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000); + assert!(nodes[0].node.send_probe(route.paths[0].clone()).is_err()); + + // Then build an actual two-hop probing path + let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], 100_000); + + match nodes[0].node.send_probe(route.paths[0].clone()) { + Ok((payment_hash, payment_id)) => { + assert!(nodes[0].node.payment_is_probe(&payment_hash, &payment_id)); + assert!(!nodes[1].node.payment_is_probe(&payment_hash, &payment_id)); + assert!(!nodes[2].node.payment_is_probe(&payment_hash, &payment_id)); + }, + _ => panic!(), + } + + get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + check_added_monitors!(nodes[0], 1); +} + +#[test] +fn successful_probe_yields_event() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + + let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], 100_000); + + let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap(); + + // node[0] -- update_add_htlcs -> node[1] + check_added_monitors!(nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &probe_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + + // node[1] -- update_add_htlcs -> node[2] + check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &probe_event.msgs[0]); + check_added_monitors!(nodes[2], 0); + commitment_signed_dance!(nodes[2], nodes[1], probe_event.commitment_msg, true, true); + + // node[1] <- update_fail_htlcs -- node[2] + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, true); + + // node[0] <- update_fail_htlcs -- node[1] + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + check_added_monitors!(nodes[0], 0); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); + + let mut events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events.drain(..).next().unwrap() { + crate::util::events::Event::ProbeSuccessful { payment_id: ev_pid, payment_hash: ev_ph, .. } => { + assert_eq!(payment_id, ev_pid); + assert_eq!(payment_hash, ev_ph); + }, + _ => panic!(), + }; +} + +#[test] +fn failed_probe_yields_event() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 90000000, InitFeatures::known(), InitFeatures::known()); + + let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id()); + + let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 9_999_000, 42); + + let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap(); + + // node[0] -- update_add_htlcs -> node[1] + check_added_monitors!(nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &probe_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + + // node[0] <- update_fail_htlcs -- node[1] + check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + // Skip the PendingHTLCsForwardable event + let _events = nodes[1].node.get_and_clear_pending_events(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + check_added_monitors!(nodes[0], 0); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); + + let mut events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events.drain(..).next().unwrap() { + crate::util::events::Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => { + assert_eq!(payment_id, ev_pid); + assert_eq!(payment_hash, ev_ph); + }, + _ => panic!(), + }; +}