//! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry
//! payments thereafter.
-use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
+use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen};
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::sign::EntropySource;
-use crate::chain::transaction::OutPoint;
use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentFailureReason, PaymentPurpose};
use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, commit_tx_fee_msat, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI};
use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo};
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None);
- do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
+ do_claim_payment_along_route(
+ ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage)
+ );
expect_payment_sent!(nodes[0], payment_preimage, Some(new_route.paths[0].hops[0].fee_msat));
}
do_test_completed_payment_not_retryable_on_reload(false);
}
-
-fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool) {
+fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool) {
// When a Channel is closed, any outbound HTLCs which were relayed through it are simply
- // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
- // having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
- // the ChannelMonitor tells it to.
+ // dropped. From there, the ChannelManager relies on the ChannelMonitor having a copy of the
+ // relevant fail-/claim-back data and processes the HTLC fail/claim when the ChannelMonitor tells
+ // it to.
//
- // If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the
- // ChannelManager the HTLC event until after the monitor is re-persisted. This should prevent a
- // duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event).
+ // If, due to an on-chain event, an HTLC is failed/claimed, we provide the
+ // ChannelManager with the HTLC event without waiting for ChannelMonitor persistence.
+ // This might generate duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event) on reload.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let persister;
// Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
// returning InProgress. This should cause the claim event to never make its way to the
// ChannelManager.
- chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
if payment_timeout {
connect_block(&nodes[0], &claim_block);
}
- let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
- let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap()
- .get_mut(&funding_txo).unwrap().drain().collect();
- // If we are using chain::Confirm instead of chain::Listen, we will get the same update twice.
- // If we're testing connection idempotency we may get substantially more.
- assert!(mon_updates.len() >= 1);
- assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
- assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ // Note that we skip persisting ChannelMonitors. We should still be generating the payment sent
+ // event without ChannelMonitor persistence. If we reset to a previous state on reload, the block
+ // should be replayed and we'll regenerate the event.
// If we persist the ChannelManager here, we should get the PaymentSent event after
// deserialization.
chan_manager_serialized = nodes[0].node.encode();
}
- // Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
- // payment sent event.
- chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
- for update in mon_updates {
- nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, update).unwrap();
- }
if payment_timeout {
expect_payment_failed!(nodes[0], payment_hash, false);
} else {
}
#[test]
-fn test_dup_htlc_onchain_fails_on_reload() {
- do_test_dup_htlc_onchain_fails_on_reload(true, true, true);
- do_test_dup_htlc_onchain_fails_on_reload(true, true, false);
- do_test_dup_htlc_onchain_fails_on_reload(true, false, false);
- do_test_dup_htlc_onchain_fails_on_reload(false, true, true);
- do_test_dup_htlc_onchain_fails_on_reload(false, true, false);
- do_test_dup_htlc_onchain_fails_on_reload(false, false, false);
+fn test_dup_htlc_onchain_doesnt_fail_on_reload() {
+ do_test_dup_htlc_onchain_doesnt_fail_on_reload(true, true, true);
+ do_test_dup_htlc_onchain_doesnt_fail_on_reload(true, true, false);
+ do_test_dup_htlc_onchain_doesnt_fail_on_reload(true, false, false);
+ do_test_dup_htlc_onchain_doesnt_fail_on_reload(false, true, true);
+ do_test_dup_htlc_onchain_doesnt_fail_on_reload(false, true, false);
+ do_test_dup_htlc_onchain_doesnt_fail_on_reload(false, false, false);
}
#[test]
// Claim the payment backwards, but note that the PaymentSent event is still pending and has
// not been seen by the user. At this point, from the user perspective nothing has changed, so
// we must remain just as idempotent as we were before.
- do_claim_payment_along_route(&nodes[0], &[&[&nodes[1]]], false, first_payment_preimage);
+ do_claim_payment_along_route(
+ ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], first_payment_preimage)
+ );
for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
nodes[0].node.timer_tick_occurred();
let payment_preimage = nodes[2].node.get_payment_preimage(payment_hash, payment_secret).unwrap();
expect_payment_claimable!(&nodes[2], payment_hash, payment_secret, amt_msat, Some(payment_preimage), nodes[2].node.get_our_node_id());
- do_claim_payment_along_route(&nodes[0], &vec!(&vec!(&nodes[1], &nodes[2])[..]), false, payment_preimage);
+ do_claim_payment_along_route(
+ ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage)
+ );
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
match events[0] {
_ => panic!("Unexpected event"),
}
- do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]],
- false, our_payment_preimage);
+ do_claim_payment_along_route(
+ ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], our_payment_preimage)
+ );
expect_payment_sent(&nodes[0], our_payment_preimage, Some(Some(2000)), true, true);
} else {
// Expect fail back
let payment_hash = PaymentHash(Sha256::hash(&keysend_preimage.0).to_byte_array());
let (onion_routing_packet, first_hop_msat, cltv_expiry) = onion_utils::create_payment_onion(
- &secp_ctx, &route.paths[0], &session_priv, amt_msat, recipient_onion.clone(),
+ &secp_ctx, &route.paths[0], &session_priv, amt_msat, &recipient_onion,
nodes[0].best_block_info().1, &payment_hash, &Some(keysend_preimage), prng_seed
).unwrap();