Add baseline OnionMessenger and msgs::OnionMessage and its serialization
[rust-lightning] / lightning / src / ln / payment_tests.rs
index 07e531c5b4b7e330b252414a89207ea70e698bc2..7f725a42141013b778ece0e0b2e5a478c71a5d3c 100644 (file)
@@ -15,6 +15,7 @@ use chain::{ChannelMonitorUpdateErr, Confirm, Listen, Watch};
 use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
 use chain::transaction::OutPoint;
 use chain::keysinterface::KeysInterface;
+use ln::channel::EXPIRE_PREV_CONFIG_TICKS;
 use ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, ChannelManagerReadArgs, MPP_TIMEOUT_TICKS, PaymentId, PaymentSendFailure};
 use ln::features::{InitFeatures, InvoiceFeatures};
 use ln::msgs;
@@ -70,7 +71,7 @@ fn retry_single_path_payment() {
        check_added_monitors!(nodes[1], 1);
        nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false);
-       expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
 
        // Rebalance the channel so the retry succeeds.
        send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
@@ -173,7 +174,7 @@ fn mpp_retry() {
        check_added_monitors!(nodes[2], 1);
        nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false);
-       expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
 
        // Rebalance the channel so the second half of the payment can succeed.
        send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
@@ -251,7 +252,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) {
                check_added_monitors!(nodes[1], 1);
                commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates_1_0.commitment_signed, false);
 
-               expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..]));
+               expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..]));
        } else {
                // Pass half of the payment along the second path.
                pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), true, None);
@@ -521,7 +522,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
                confirm_transaction(&nodes[0], &first_htlc_timeout_tx);
        }
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
-       expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
 
        // Finally, retry the payment (which was reloaded from the ChannelMonitor when nodes[0] was
        // reloaded) via a route over the new channel, which work without issue and eventually be
@@ -531,8 +532,19 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        // Update the fee on the middle hop to ensure PaymentSent events have the correct (retried) fee
        // and not the original fee. We also update node[1]'s relevant config as
        // do_claim_payment_along_route expects us to never overpay.
-       nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan_id_2).unwrap().config.forwarding_fee_base_msat += 100_000;
-       new_route.paths[0][0].fee_msat += 100_000;
+       {
+               let mut channel_state = nodes[1].node.channel_state.lock().unwrap();
+               let mut channel = channel_state.by_id.get_mut(&chan_id_2).unwrap();
+               let mut new_config = channel.config();
+               new_config.forwarding_fee_base_msat += 100_000;
+               channel.update_config(&new_config);
+               new_route.paths[0][0].fee_msat += 100_000;
+       }
+
+       // Force expiration of the channel's previous config.
+       for _ in 0..EXPIRE_PREV_CONFIG_TICKS {
+               nodes[1].node.timer_tick_occurred();
+       }
 
        assert!(nodes[0].node.retry_payment(&new_route, payment_id_1).is_err()); // Shouldn't be allowed to retry a fulfilled payment
        nodes[0].node.retry_payment(&new_route, payment_id).unwrap();
@@ -572,7 +584,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
        // nodes[0].
        let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
-       nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);