X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=2fbc36ce9a5b365d1b548f2fa5038722ab1db625;hb=d9eb201bd8da97e9f249c793abeb9cbdb00f4744;hp=fbd7ddddf8e8d07923ef0b57a8920dc6a3fb6665;hpb=01847277b957ec94129141a7e7439ae539c094f1;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index fbd7dddd..2fbc36ce 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -17,7 +17,7 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor; use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use crate::chain::transaction::OutPoint; -use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource}; +use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel}; @@ -31,7 +31,7 @@ use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures}; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; use crate::util::enforcing_trait_impls::EnforcingSigner; -use crate::util::test_utils; +use crate::util::test_utils::{self, WatchtowerPersister}; use crate::util::errors::APIError; use crate::util::ser::{Writeable, ReadableArgs}; use crate::util::string::UntrustedString; @@ -703,7 +703,7 @@ fn test_update_fee_that_funder_cannot_afford() { let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = local_chan.get_signer(); - let pubkeys = chan_signer.pubkeys(); + let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, pubkeys.funding_pubkey) }; @@ -712,9 +712,9 @@ fn test_update_fee_that_funder_cannot_afford() { let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = remote_chan.get_signer(); - let pubkeys = chan_signer.pubkeys(); + let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, - chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), pubkeys.funding_pubkey) }; @@ -738,7 +738,7 @@ fn test_update_fee_that_funder_cannot_afford() { &mut htlcs, &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable() ); - local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() + local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() }; let commit_signed_msg = msgs::CommitmentSigned { @@ -1417,23 +1417,23 @@ fn test_fee_spike_violation_fails_htlc() { let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = local_chan.get_signer(); // Make the signer believe we validated another commitment, so we can release the secret - chan_signer.get_enforcement_state().last_holder_commitment -= 1; + chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; - let pubkeys = chan_signer.pubkeys(); + let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, - chan_signer.release_commitment_secret(INITIAL_COMMITMENT_NUMBER), - chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx), - chan_signer.pubkeys().funding_pubkey) + chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER), + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx), + chan_signer.as_ref().pubkeys().funding_pubkey) }; let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = remote_chan.get_signer(); - let pubkeys = chan_signer.pubkeys(); + let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, - chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), - chan_signer.pubkeys().funding_pubkey) + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), + chan_signer.as_ref().pubkeys().funding_pubkey) }; // Assemble the set of keys we can use for signatures for our commitment_signed message. @@ -1469,7 +1469,7 @@ fn test_fee_spike_violation_fails_htlc() { &mut vec![(accepted_htlc_info, ())], &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable() ); - local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() + local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() }; let commit_signed_msg = msgs::CommitmentSigned { @@ -2124,7 +2124,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - expect_payment_sent_without_paths!(nodes[0], payment_preimage_1); + expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg); @@ -2153,7 +2153,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - expect_payment_sent_without_paths!(nodes[0], payment_preimage_2); + expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); @@ -2554,6 +2554,72 @@ fn revoked_output_claim() { check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); } +#[test] +fn test_forming_justice_tx_from_monitor_updates() { + do_test_forming_justice_tx_from_monitor_updates(true); + do_test_forming_justice_tx_from_monitor_updates(false); +} + +fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: bool) { + // Simple test to make sure that the justice tx formed in WatchtowerPersister + // is properly formed and can be broadcasted/confirmed successfully in the event + // that a revoked commitment transaction is broadcasted + // (Similar to `revoked_output_claim` test but we get the justice tx + broadcast manually) + let chanmon_cfgs = create_chanmon_cfgs(2); + let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script().unwrap(); + let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script().unwrap(); + let persisters = vec![WatchtowerPersister::new(destination_script0), + WatchtowerPersister::new(destination_script1)]; + let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect()); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); + let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 }; + + if !broadcast_initial_commitment { + // Send a payment to move the channel forward + send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000); + } + + // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output. + // We'll keep this commitment transaction to broadcast once it's revoked. + let revoked_local_txn = get_local_commitment_txn!(nodes[0], channel_id); + assert_eq!(revoked_local_txn.len(), 1); + let revoked_commitment_tx = &revoked_local_txn[0]; + + // Send another payment, now revoking the previous commitment tx + send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000); + + let justice_tx = persisters[1].justice_tx(funding_txo, &revoked_commitment_tx.txid()).unwrap(); + check_spends!(justice_tx, revoked_commitment_tx); + + mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]); + mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]); + + check_added_monitors!(nodes[1], 1); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, + &[nodes[0].node.get_our_node_id()], 100_000); + get_announce_close_broadcast_events(&nodes, 1, 0); + + check_added_monitors!(nodes[0], 1); + check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false, + &[nodes[1].node.get_our_node_id()], 100_000); + + // Check that the justice tx has sent the revoked output value to nodes[1] + let monitor = get_monitor!(nodes[1], channel_id); + let total_claimable_balance = monitor.get_claimable_balances().iter().fold(0, |sum, balance| { + match balance { + channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => sum + amount_satoshis, + _ => panic!("Unexpected balance type"), + } + }); + // On the first commitment, node[1]'s balance was below dust so it didn't have an output + let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value }; + let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value; + assert_eq!(total_claimable_balance, expected_claimable_balance); +} + + #[test] fn claim_htlc_outputs_shared_tx() { // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx @@ -3584,7 +3650,7 @@ fn test_dup_events_on_peer_disconnect() { check_added_monitors!(nodes[1], 1); let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]); - expect_payment_sent_without_paths!(nodes[0], payment_preimage); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); @@ -3706,6 +3772,7 @@ fn test_simple_peer_disconnect() { _ => panic!("Unexpected event"), } } + check_added_monitors(&nodes[0], 1); claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4); fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6); @@ -4302,7 +4369,7 @@ macro_rules! check_spendable_outputs { let secp_ctx = Secp256k1::new(); for event in events.drain(..) { match event { - Event::SpendableOutputs { mut outputs } => { + Event::SpendableOutputs { mut outputs, channel_id: _ } => { for outp in outputs.drain(..) { txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap()); all_outputs.push(outp); @@ -4943,7 +5010,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); - expect_payment_sent(&nodes[0], our_payment_preimage, None, true); + expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true); } #[test] @@ -5486,7 +5553,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); - expect_payment_sent_without_paths!(nodes[0], payment_preimage); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); check_added_monitors!(nodes[0], 1); @@ -6109,7 +6176,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1); check_closed_broadcast!(nodes[1], true).unwrap(); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }, + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }, [nodes[0].node.get_our_node_id()], 100000); } @@ -7013,7 +7080,7 @@ fn test_user_configurable_csv_delay() { open_channel.to_self_delay = 200; if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, - &low_our_to_self_config, 0, &nodes[0].logger, 42, /*is_0conf=*/false) + &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false) { match error { ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); }, @@ -7045,7 +7112,7 @@ fn test_user_configurable_csv_delay() { open_channel.to_self_delay = 200; if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, - &high_their_to_self_config, 0, &nodes[0].logger, 42, /*is_0conf=*/false) + &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false) { match error { ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); }, @@ -7592,16 +7659,16 @@ fn test_counterparty_raa_skip_no_crash() { const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; // Make signer believe we got a counterparty signature, so that it allows the revocation - keys.get_enforcement_state().last_holder_commitment -= 1; - per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER); + keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; + per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER); // Must revoke without gaps - keys.get_enforcement_state().last_holder_commitment -= 1; - keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1); + keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; + keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1); - keys.get_enforcement_state().last_holder_commitment -= 1; + keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(), - &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap()); + &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap()); } nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), @@ -9359,7 +9426,7 @@ fn test_inconsistent_mpp_params() { pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None); do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage); - expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true); + expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true); } #[test] @@ -9974,7 +10041,15 @@ fn test_remove_expired_outbound_unfunded_channels() { nodes[0].node.timer_tick_occurred(); check_outbound_channel_existence(false); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + match msg_events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { + assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake"); + }, + _ => panic!("Unexpected event"), + } + check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -10017,5 +10092,100 @@ fn test_remove_expired_inbound_unfunded_channels() { nodes[1].node.timer_tick_occurred(); check_inbound_channel_existence(false); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + match msg_events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { + assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake"); + }, + _ => panic!("Unexpected event"), + } + check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000); +} + +fn do_test_multi_post_event_actions(do_reload: bool) { + // Tests handling multiple post-Event actions at once. + // There is specific code in ChannelManager to handle channels where multiple post-Event + // `ChannelMonitorUpdates` are pending at once. This test exercises that code. + // + // Specifically, we test calling `get_and_clear_pending_events` while there are two + // PaymentSents from different channels and one channel has two pending `ChannelMonitorUpdate`s + // - one from an RAA and one from an inbound commitment_signed. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let (persister, chain_monitor); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_0_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2; + + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); + send_payment(&nodes[0], &[&nodes[2]], 1_000_000); + + let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000); + + nodes[1].node.claim_funds(our_payment_preimage); + check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); + + nodes[2].node.claim_funds(payment_preimage_2); + check_added_monitors!(nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000); + + for dest in &[1, 2] { + let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false); + check_added_monitors(&nodes[0], 0); + } + + let (route, payment_hash_3, _, payment_secret_3) = + get_route_and_payment_hash!(nodes[1], nodes[0], 100_000); + let payment_id = PaymentId(payment_hash_3.0); + nodes[1].node.send_payment_with_route(&route, payment_hash_3, + RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap(); + check_added_monitors(&nodes[1], 1); + + let send_event = SendEvent::from_node(&nodes[1]); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event.commitment_msg); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + if do_reload { + let nodes_0_serialized = nodes[0].node.encode(); + let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode(); + reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized); + + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2])); + } + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 4); + if let Event::PaymentSent { payment_preimage, .. } = events[0] { + assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2); + } else { panic!(); } + if let Event::PaymentSent { payment_preimage, .. } = events[1] { + assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2); + } else { panic!(); } + if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); } + if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); } + + // After the events are processed, the ChannelMonitorUpdates will be released and, upon their + // completion, we'll respond to nodes[1] with an RAA + CS. + get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + check_added_monitors(&nodes[0], 3); +} + +#[test] +fn test_multi_post_event_actions() { + do_test_multi_post_event_actions(true); + do_test_multi_post_event_actions(false); }