//! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry
//! payments thereafter.
-use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
+use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen};
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::sign::EntropySource;
-use crate::chain::transaction::OutPoint;
use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentFailureReason, PaymentPurpose};
use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, commit_tx_fee_msat, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI};
use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo};
use crate::ln::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures};
-use crate::ln::{msgs, ChannelId, PaymentHash, PaymentSecret, PaymentPreimage};
+use crate::ln::msgs;
+use crate::ln::types::{ChannelId, PaymentHash, PaymentSecret, PaymentPreimage};
use crate::ln::msgs::ChannelMessageHandler;
use crate::ln::onion_utils;
use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, Retry};
use crate::ln::functional_test_utils;
use crate::ln::functional_test_utils::*;
use crate::routing::gossip::NodeId;
+
#[cfg(feature = "std")]
use {
crate::util::time::tests::SinceEpoch,
- std::time::{SystemTime, Instant, Duration}
+ std::time::{SystemTime, Instant, Duration},
};
#[test]
// Can't use claim_payment_along_route as it doesn't support overpayment, so we break out the
// individual steps here.
+ nodes[3].node.claim_funds(payment_preimage);
let extra_fees = vec![0, total_overpaid_amount];
- let expected_total_fee_msat = do_claim_payment_along_route_with_extra_penultimate_hop_fees(
- &nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], &extra_fees[..], false,
- payment_preimage);
+ let expected_route = &[&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]][..]];
+ let args = ClaimAlongRouteArgs::new(&nodes[0], &expected_route[..], payment_preimage)
+ .with_expected_min_htlc_overpay(extra_fees);
+ let expected_total_fee_msat = pass_claimed_payment_along_route(args);
expect_payment_sent!(&nodes[0], payment_preimage, Some(expected_total_fee_msat));
}
do_test_completed_payment_not_retryable_on_reload(false);
}
-
-fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool) {
+fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool) {
// When a Channel is closed, any outbound HTLCs which were relayed through it are simply
- // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
- // having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
- // the ChannelMonitor tells it to.
+ // dropped. From there, the ChannelManager relies on the ChannelMonitor having a copy of the
+ // relevant fail-/claim-back data and processes the HTLC fail/claim when the ChannelMonitor tells
+ // it to.
//
- // If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the
- // ChannelManager the HTLC event until after the monitor is re-persisted. This should prevent a
- // duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event).
+ // If, due to an on-chain event, an HTLC is failed/claimed, we provide the
+ // ChannelManager with the HTLC event without waiting for ChannelMonitor persistence.
+ // This might generate duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event) on reload.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let persister;
connect_block(&nodes[0], &claim_block);
}
- let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
- let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap()
- .get_mut(&funding_txo).unwrap().drain().collect();
- // If we are using chain::Confirm instead of chain::Listen, we will get the same update twice.
- // If we're testing connection idempotency we may get substantially more.
- assert!(mon_updates.len() >= 1);
- assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
- assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ // Note that we skip persisting ChannelMonitors. We should still be generating the payment sent
+ // event without ChannelMonitor persistence. If we reset to a previous state on reload, the block
+ // should be replayed and we'll regenerate the event.
// If we persist the ChannelManager here, we should get the PaymentSent event after
// deserialization.
chan_manager_serialized = nodes[0].node.encode();
}
- // Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
- // payment sent event.
- chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
- for update in mon_updates {
- nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, update).unwrap();
- }
if payment_timeout {
expect_payment_failed!(nodes[0], payment_hash, false);
} else {
}
#[test]
-fn test_dup_htlc_onchain_fails_on_reload() {
- do_test_dup_htlc_onchain_fails_on_reload(true, true, true);
- do_test_dup_htlc_onchain_fails_on_reload(true, true, false);
- do_test_dup_htlc_onchain_fails_on_reload(true, false, false);
- do_test_dup_htlc_onchain_fails_on_reload(false, true, true);
- do_test_dup_htlc_onchain_fails_on_reload(false, true, false);
- do_test_dup_htlc_onchain_fails_on_reload(false, false, false);
+fn test_dup_htlc_onchain_doesnt_fail_on_reload() {
+ do_test_dup_htlc_onchain_doesnt_fail_on_reload(true, true, true);
+ do_test_dup_htlc_onchain_doesnt_fail_on_reload(true, true, false);
+ do_test_dup_htlc_onchain_doesnt_fail_on_reload(true, false, false);
+ do_test_dup_htlc_onchain_doesnt_fail_on_reload(false, true, true);
+ do_test_dup_htlc_onchain_doesnt_fail_on_reload(false, true, false);
+ do_test_dup_htlc_onchain_doesnt_fail_on_reload(false, false, false);
}
#[test]
assert_eq!(skimmed_fee_msat * num_mpp_parts as u64, counterparty_skimmed_fee_msat);
assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
match purpose {
- crate::events::PaymentPurpose::InvoicePayment { payment_preimage: ev_payment_preimage,
- payment_secret: ev_payment_secret, .. } =>
- {
+ crate::events::PaymentPurpose::Bolt11InvoicePayment {
+ payment_preimage: ev_payment_preimage,
+ payment_secret: ev_payment_secret,
+ ..
+ } => {
assert_eq!(payment_preimage, ev_payment_preimage.unwrap());
assert_eq!(payment_secret, *ev_payment_secret);
},
let mut expected_paths = Vec::new();
for _ in 0..num_mpp_parts { expected_paths_vecs.push(vec!(&nodes[1], &nodes[2])); }
for i in 0..num_mpp_parts { expected_paths.push(&expected_paths_vecs[i][..]); }
- let total_fee_msat = do_claim_payment_along_route_with_extra_penultimate_hop_fees(
- &nodes[0], &expected_paths[..], &vec![skimmed_fee_msat as u32; num_mpp_parts][..], false,
- payment_preimage);
+ expected_paths[0].last().unwrap().node.claim_funds(payment_preimage);
+ let args = ClaimAlongRouteArgs::new(&nodes[0], &expected_paths[..], payment_preimage)
+ .with_expected_extra_fees(vec![skimmed_fee_msat as u32; num_mpp_parts]);
+ let total_fee_msat = pass_claimed_payment_along_route(args);
// The sender doesn't know that the penultimate hop took an extra fee.
expect_payment_sent(&nodes[0], payment_preimage,
Some(Some(total_fee_msat - skimmed_fee_msat * num_mpp_parts as u64)), true, true);
let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 0);
} else if test == AutoRetry::FailTimeout {
- #[cfg(not(feature = "no-std"))] {
+ #[cfg(feature = "std")] {
// Ensure ChannelManager will not retry a payment if it times out due to Retry::Timeout.
nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
PaymentId(payment_hash.0), route_params, Retry::Timeout(Duration::from_secs(60))).unwrap();
match (known_tlvs, even_tlvs) {
(true, _) => {
nodes[1].node.claim_funds_with_known_custom_tlvs(our_payment_preimage);
- let expected_total_fee_msat = pass_claimed_payment_along_route(&nodes[0], &[&[&nodes[1]]], &[0; 1], false, our_payment_preimage);
+ let expected_total_fee_msat = pass_claimed_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], our_payment_preimage));
expect_payment_sent!(&nodes[0], our_payment_preimage, Some(expected_total_fee_msat));
},
(false, false) => {
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
- let payment_claimable = pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000,
- payment_hash, Some(payment_secret), events.pop().unwrap(), true, None).unwrap();
- match payment_claimable {
- Event::PaymentClaimable { onion_fields, .. } => {
- assert_eq!(&onion_fields.unwrap().custom_tlvs()[..], &custom_tlvs[..]);
- },
- _ => panic!("Unexpected event"),
- };
+ let path = &[&nodes[1], &nodes[2]];
+ let args = PassAlongPathArgs::new(&nodes[0], path, 1_000_000, payment_hash, events.pop().unwrap())
+ .with_payment_secret(payment_secret)
+ .with_custom_tlvs(custom_tlvs);
+ do_pass_along_path(args);
claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
}