X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=f5ba7166ca71b80c0732c0f758001b82f9aa371c;hb=8866ed35330bae1af2237c1951d9c4025938aa65;hp=546c23247aaa45e18bc1c269f179004ebb432bdf;hpb=7a63ab77da7e0be35b1c5d87643e5461cd593b94;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 546c2324..f5ba7166 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -17,7 +17,7 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor; use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use crate::chain::transaction::OutPoint; -use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource}; +use crate::sign::{EcdsaChannelSigner, EntropySource}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel}; @@ -703,7 +703,7 @@ fn test_update_fee_that_funder_cannot_afford() { let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = local_chan.get_signer(); - let pubkeys = chan_signer.pubkeys(); + let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, pubkeys.funding_pubkey) }; @@ -712,9 +712,9 @@ fn test_update_fee_that_funder_cannot_afford() { let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = remote_chan.get_signer(); - let pubkeys = chan_signer.pubkeys(); + let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, - chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), pubkeys.funding_pubkey) }; @@ -738,7 +738,7 @@ fn test_update_fee_that_funder_cannot_afford() { &mut htlcs, &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable() ); - local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() + local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() }; let commit_signed_msg = msgs::CommitmentSigned { @@ -763,7 +763,8 @@ fn test_update_fee_that_funder_cannot_afford() { nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, + [nodes[0].node.get_our_node_id()], channel_value); } #[test] @@ -862,8 +863,8 @@ fn test_update_fee_with_fundee_update_add_htlc() { send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000); send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -976,8 +977,8 @@ fn test_update_fee() { assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30); assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -1085,17 +1086,17 @@ fn fake_network_test() { // Close down the channels... close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); - check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true); - check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure); - check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000); + check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000); close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); - check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000); + check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -1287,7 +1288,7 @@ fn test_duplicate_htlc_different_direction_onchain() { mine_transaction(&nodes[0], &remote_txn[0]); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); @@ -1416,23 +1417,23 @@ fn test_fee_spike_violation_fails_htlc() { let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = local_chan.get_signer(); // Make the signer believe we validated another commitment, so we can release the secret - chan_signer.get_enforcement_state().last_holder_commitment -= 1; + chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; - let pubkeys = chan_signer.pubkeys(); + let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, - chan_signer.release_commitment_secret(INITIAL_COMMITMENT_NUMBER), - chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx), - chan_signer.pubkeys().funding_pubkey) + chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER), + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx), + chan_signer.as_ref().pubkeys().funding_pubkey) }; let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = remote_chan.get_signer(); - let pubkeys = chan_signer.pubkeys(); + let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, - chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), - chan_signer.pubkeys().funding_pubkey) + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), + chan_signer.as_ref().pubkeys().funding_pubkey) }; // Assemble the set of keys we can use for signatures for our commitment_signed message. @@ -1468,7 +1469,7 @@ fn test_fee_spike_violation_fails_htlc() { &mut vec![(accepted_htlc_info, ())], &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable() ); - local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() + local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap() }; let commit_signed_msg = msgs::CommitmentSigned { @@ -1595,7 +1596,8 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() }); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() }, + [nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -1772,7 +1774,8 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() }); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() }, + [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -2121,7 +2124,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - expect_payment_sent_without_paths!(nodes[0], payment_preimage_1); + expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg); @@ -2150,7 +2153,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - expect_payment_sent_without_paths!(nodes[0], payment_preimage_2); + expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); @@ -2253,8 +2256,8 @@ fn channel_monitor_network_test() { check_closed_broadcast!(nodes[0], true); assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[1].node.list_channels().len(), 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); // One pending HTLC is discarded by the force-close: let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000); @@ -2275,8 +2278,8 @@ fn channel_monitor_network_test() { check_closed_broadcast!(nodes[2], true); assert_eq!(nodes[1].node.list_channels().len(), 0); assert_eq!(nodes[2].node.list_channels().len(), 1); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); macro_rules! claim_funds { ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => { @@ -2320,8 +2323,8 @@ fn channel_monitor_network_test() { check_closed_broadcast!(nodes[3], true); assert_eq!(nodes[2].node.list_channels().len(), 0); assert_eq!(nodes[3].node.list_channels().len(), 1); - check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed); - check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000); + check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and // confusing us in the following tests. @@ -2394,8 +2397,8 @@ fn channel_monitor_network_test() { assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon), ChannelMonitorUpdateStatus::Completed); - check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed); - check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[4].node.get_our_node_id()], 100000); + check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed, [nodes[3].node.get_our_node_id()], 100000); } #[test] @@ -2443,7 +2446,7 @@ fn test_justice_tx_htlc_timeout() { node_txn.swap_remove(0); } check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE); mine_transaction(&nodes[0], &revoked_local_txn[0]); @@ -2451,7 +2454,7 @@ fn test_justice_tx_htlc_timeout() { // Verify broadcast of revoked HTLC-timeout let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); // Broadcast revoked HTLC-timeout on node 1 mine_transaction(&nodes[1], &node_txn[1]); test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone()); @@ -2506,11 +2509,11 @@ fn test_justice_tx_htlc_success() { test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE); mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS); check_added_monitors!(nodes[1], 1); mine_transaction(&nodes[0], &node_txn[1]); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone()); } get_announce_close_broadcast_events(&nodes, 0, 1); @@ -2538,7 +2541,7 @@ fn revoked_output_claim() { // Inform nodes[1] that nodes[0] broadcast a stale tx mine_transaction(&nodes[1], &revoked_local_txn[0]); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output @@ -2548,7 +2551,7 @@ fn revoked_output_claim() { mine_transaction(&nodes[0], &revoked_local_txn[0]); get_announce_close_broadcast_events(&nodes, 0, 1); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -2585,10 +2588,10 @@ fn claim_htlc_outputs_shared_tx() { { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); mine_transaction(&nodes[1], &revoked_local_txn[0]); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -2647,7 +2650,7 @@ fn claim_htlc_outputs_single_tx() { check_added_monitors!(nodes[0], 1); confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); let mut events = nodes[0].node.get_and_clear_pending_events(); expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true); match events.last().unwrap() { @@ -2759,7 +2762,7 @@ fn test_htlc_on_chain_success() { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx) assert_eq!(node_txn.len(), 2); check_spends!(node_txn[0], commitment_tx[0]); @@ -2876,7 +2879,7 @@ fn test_htlc_on_chain_success() { mine_transaction(&nodes[1], &node_a_commitment_tx[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn let commitment_spend = @@ -2984,14 +2987,15 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 0); // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence mine_transaction(&nodes[1], &commitment_tx[0]); - check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false); + check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false + , [nodes[2].node.get_our_node_id()], 100000); connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1); let timeout_tx = { let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); @@ -3035,7 +3039,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], commitment_tx[0]); @@ -3072,7 +3076,7 @@ fn test_simple_commitment_revoked_fail_backward() { let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); @@ -3465,7 +3469,7 @@ fn test_htlc_ignore_latest_remote_commitment() { connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 3); @@ -3475,7 +3479,7 @@ fn test_htlc_ignore_latest_remote_commitment() { connect_block(&nodes[1], &block); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); // Duplicate the connect_block call since this may happen due to other listeners // registering new transactions @@ -3527,7 +3531,7 @@ fn test_force_close_fail_back() { nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed); + check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); let tx = { let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't @@ -3542,7 +3546,7 @@ fn test_force_close_fail_back() { // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. { @@ -3580,7 +3584,7 @@ fn test_dup_events_on_peer_disconnect() { check_added_monitors!(nodes[1], 1); let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]); - expect_payment_sent_without_paths!(nodes[0], payment_preimage); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); @@ -3628,8 +3632,10 @@ fn test_peer_disconnected_before_funding_broadcasted() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - check_closed_event(&nodes[0], 1, ClosureReason::DisconnectedPeer, false); - check_closed_event(&nodes[1], 1, ClosureReason::DisconnectedPeer, false); + check_closed_event!(&nodes[0], 1, ClosureReason::DisconnectedPeer, false + , [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false + , [nodes[0].node.get_our_node_id()], 1000000); } #[test] @@ -3700,6 +3706,7 @@ fn test_simple_peer_disconnect() { _ => panic!("Unexpected event"), } } + check_added_monitors(&nodes[0], 1); claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4); fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6); @@ -4296,7 +4303,7 @@ macro_rules! check_spendable_outputs { let secp_ctx = Secp256k1::new(); for event in events.drain(..) { match event { - Event::SpendableOutputs { mut outputs } => { + Event::SpendableOutputs { mut outputs, channel_id: _ } => { for outp in outputs.drain(..) { txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap()); all_outputs.push(outp); @@ -4327,7 +4334,7 @@ fn test_claim_sizeable_push_msat() { nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], chan.3); @@ -4356,7 +4363,7 @@ fn test_claim_on_remote_sizeable_push_msat() { nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); @@ -4366,7 +4373,7 @@ fn test_claim_on_remote_sizeable_push_msat() { mine_transaction(&nodes[1], &node_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -4394,7 +4401,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); mine_transaction(&nodes[1], &node_txn[0]); @@ -4446,7 +4453,7 @@ fn test_static_spendable_outputs_preimage_tx() { assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); mine_transaction(&nodes[1], &node_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -4490,7 +4497,7 @@ fn test_static_spendable_outputs_timeout_tx() { assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); mine_transaction(&nodes[1], &node_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[1], our_payment_hash, false); @@ -4521,7 +4528,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); @@ -4558,7 +4565,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -4572,7 +4579,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()])); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs @@ -4626,7 +4633,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(revoked_htlc_txn.len(), 1); @@ -4642,7 +4649,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()])); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success @@ -4722,7 +4729,7 @@ fn test_onchain_to_onchain_claim() { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx) assert_eq!(c_txn.len(), 1); @@ -4781,7 +4788,7 @@ fn test_onchain_to_onchain_claim() { // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2); mine_transaction(&nodes[1], &commitment_tx[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: HTLC-Success tx assert_eq!(b_txn.len(), 1); @@ -4838,7 +4845,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { mine_transaction(&nodes[1], &commitment_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires let htlc_timeout_tx; @@ -4885,7 +4892,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { mine_transaction(&nodes[2], &commitment_txn[0]); check_added_monitors!(nodes[2], 2); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); let events = nodes[2].node.get_and_clear_pending_msg_events(); match events[0] { MessageSendEvent::UpdateHTLCs { .. } => {}, @@ -4937,7 +4944,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); - expect_payment_sent(&nodes[0], our_payment_preimage, None, true); + expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true); } #[test] @@ -4963,7 +4970,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() { mine_transaction(&nodes[1], &local_txn[0]); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); let events = nodes[1].node.get_and_clear_pending_msg_events(); match events[0] { MessageSendEvent::UpdateHTLCs { .. } => {}, @@ -5318,7 +5325,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { mine_transaction(&nodes[0], &local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let htlc_timeout = { @@ -5405,7 +5412,7 @@ fn test_key_derivation_params() { connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); let htlc_timeout = { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -5447,7 +5454,7 @@ fn test_static_output_closing_tx() { let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2; mine_transaction(&nodes[0], &closing_tx); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); @@ -5455,7 +5462,7 @@ fn test_static_output_closing_tx() { check_spends!(spend_txn[0], closing_tx); mine_transaction(&nodes[1], &closing_tx); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -5480,7 +5487,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); - expect_payment_sent_without_paths!(nodes[0], payment_preimage); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); check_added_monitors!(nodes[0], 1); @@ -5497,7 +5504,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); } fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { @@ -5528,7 +5535,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); } fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { @@ -5574,7 +5581,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); } else { expect_payment_failed!(nodes[0], our_payment_hash, true); } @@ -6103,7 +6110,8 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1); check_closed_broadcast!(nodes[1], true).unwrap(); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }, + [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -6224,7 +6232,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -6260,7 +6268,7 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -6305,7 +6313,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -6329,7 +6337,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000); } #[test] @@ -6353,7 +6361,7 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height"); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -6405,7 +6413,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -6437,7 +6445,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -6469,7 +6477,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -6501,7 +6509,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -6544,7 +6552,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find"); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -6587,7 +6595,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -6634,7 +6642,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set"); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000); } #[test] @@ -6861,7 +6869,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); @@ -6924,7 +6932,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { if local { // We fail dust-HTLC 1 by broadcast of local commitment tx mine_transaction(&nodes[0], &as_commitment_tx[0]); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[0], dust_hash, false); @@ -6944,7 +6952,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { mine_transaction(&nodes[0], &bs_commitment_tx[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires @@ -7006,7 +7014,7 @@ fn test_user_configurable_csv_delay() { open_channel.to_self_delay = 200; if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, - &low_our_to_self_config, 0, &nodes[0].logger, 42) + &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false) { match error { ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); }, @@ -7030,7 +7038,7 @@ fn test_user_configurable_csv_delay() { _ => { panic!(); } } } else { panic!(); } - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000); // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new() nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); @@ -7038,7 +7046,7 @@ fn test_user_configurable_csv_delay() { open_channel.to_self_delay = 200; if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, - &high_their_to_self_config, 0, &nodes[0].logger, 42) + &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false) { match error { ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); }, @@ -7339,7 +7347,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()])); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above) let revoked_htlc_txn = { @@ -7585,16 +7593,16 @@ fn test_counterparty_raa_skip_no_crash() { const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; // Make signer believe we got a counterparty signature, so that it allows the revocation - keys.get_enforcement_state().last_holder_commitment -= 1; - per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER); + keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; + per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER); // Must revoke without gaps - keys.get_enforcement_state().last_holder_commitment -= 1; - keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1); + keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; + keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1); - keys.get_enforcement_state().last_holder_commitment -= 1; + keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(), - &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap()); + &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap()); } nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), @@ -7607,7 +7615,8 @@ fn test_counterparty_raa_skip_no_crash() { }); assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack"); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() } + , [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -7640,7 +7649,7 @@ fn test_bump_txn_sanitize_tracking_maps() { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000); let penalty_txn = { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3 @@ -7684,7 +7693,7 @@ fn test_channel_conf_timeout() { connect_blocks(&nodes[1], 1); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut); + check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000); let close_ev = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(close_ev.len(), 1); match close_ev[0] { @@ -7870,70 +7879,9 @@ fn test_manually_reject_inbound_channel_request() { } _ => panic!("Unexpected event"), } - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); -} - -#[test] -fn test_reject_funding_before_inbound_channel_accepted() { - // This tests that when `UserConfig::manually_accept_inbound_channels` is set to true, inbound - // channels must to be manually accepted through `ChannelManager::accept_inbound_channel` by - // the node operator before the counterparty sends a `FundingCreated` message. If a - // `FundingCreated` message is received before the channel is accepted, it should be rejected - // and the channel should be closed. - let mut manually_accept_conf = UserConfig::default(); - manually_accept_conf.manually_accept_inbound_channels = true; - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap(); - let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - let temp_channel_id = res.temporary_channel_id; - - nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res); - - // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in the `msg_events`. - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - // Clear the `Event::OpenChannelRequest` event without responding to the request. - nodes[1].node.get_and_clear_pending_events(); - - // Get the `AcceptChannel` message of `nodes[1]` without calling - // `ChannelManager::accept_inbound_channel`, which generates a - // `MessageSendEvent::SendAcceptChannel` event. The message is passed to `nodes[0]` - // `handle_accept_channel`, which is required in order for `create_funding_transaction` to - // succeed when `nodes[0]` is passed to it. - let accept_chan_msg = { - let mut node_1_per_peer_lock; - let mut node_1_peer_state_lock; - let channel = get_inbound_v1_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id); - channel.get_accept_channel_message() - }; - nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg); - - let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); - - nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); - let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - - // The `funding_created_msg` should be rejected by `nodes[1]` as it hasn't accepted the channel - nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); - - let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(close_msg_ev.len(), 1); - - let expected_err = "FundingCreated message received before the channel was accepted"; - match close_msg_ev[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, ref node_id, } => { - assert_eq!(msg.channel_id, temp_channel_id); - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - assert_eq!(msg.data, expected_err); - } - _ => panic!("Unexpected event"), - } - - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() }); + // There should be no more events to process, as the channel was never opened. + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); } #[test] @@ -7961,10 +7909,10 @@ fn test_can_not_accept_inbound_channel_twice() { let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0); match api_res { Err(APIError::APIMisuseError { err }) => { - assert_eq!(err, "The channel isn't currently awaiting to be accepted."); + assert_eq!(err, "No such channel awaiting to be accepted."); }, Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"), - Err(_) => panic!("Unexpected Error"), + Err(e) => panic!("Unexpected Error {:?}", e), } } _ => panic!("Unexpected event"), @@ -7992,11 +7940,11 @@ fn test_can_not_accept_unknown_inbound_channel() { let unknown_channel_id = [0; 32]; let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0); match api_res { - Err(APIError::ChannelUnavailable { err }) => { - assert_eq!(err, format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(unknown_channel_id), nodes[1].node.get_our_node_id())); + Err(APIError::APIMisuseError { err }) => { + assert_eq!(err, "No such channel awaiting to be accepted."); }, Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"), - Err(_) => panic!("Unexpected Error"), + Err(e) => panic!("Unexpected Error: {:?}", e), } } @@ -8486,7 +8434,8 @@ fn test_concurrent_monitor_claim() { let height = HTLC_TIMEOUT_BROADCAST + 1; connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); check_closed_broadcast(&nodes[0], 1, true); - check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false); + check_closed_event!(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false, + [nodes[1].node.get_our_node_id()], 100000); watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height); check_added_monitors(&nodes[0], 1); { @@ -8534,7 +8483,8 @@ fn test_pre_lockin_no_chan_closed_update() { let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id(); nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() }); assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty()); - check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true); + check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true, + [nodes[1].node.get_our_node_id(); 2], 100000); } #[test] @@ -8569,7 +8519,7 @@ fn test_htlc_no_detection() { chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); let htlc_timeout = { @@ -8635,7 +8585,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[force_closing_node], true); check_added_monitors!(nodes[force_closing_node], 1); - check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed); + check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000); if go_onchain_before_fulfill { let txn_to_broadcast = match broadcast_alice { true => alice_txn.clone(), @@ -8645,7 +8595,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain if broadcast_alice { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); } } @@ -8725,7 +8675,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain if broadcast_alice { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); } let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); if broadcast_alice { @@ -8992,7 +8942,8 @@ fn test_error_chans_closed() { nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], false); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }, + [nodes[1].node.get_our_node_id()], 100000); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); assert_eq!(nodes[0].node.list_usable_channels().len(), 2); assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2); @@ -9002,7 +8953,8 @@ fn test_error_chans_closed() { let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() }); check_added_monitors!(nodes[0], 2); - check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }); + check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }, + [nodes[1].node.get_our_node_id(); 2], 100000); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); match events[0] { @@ -9079,7 +9031,8 @@ fn test_invalid_funding_tx() { let expected_err = "funding tx had wrong script/value or output index"; confirm_transaction_at(&nodes[1], &tx, 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() }); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() }, + [nodes[0].node.get_our_node_id()], 100000); check_added_monitors!(nodes[1], 1); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); @@ -9145,7 +9098,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000); check_added_monitors!(nodes[1], 1); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); @@ -9407,7 +9360,7 @@ fn test_inconsistent_mpp_params() { pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None); do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage); - expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true); + expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true); } #[test] @@ -9786,7 +9739,8 @@ fn accept_busted_but_better_fee() { MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap()); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { - err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() }); + err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() }, + [nodes[0].node.get_our_node_id()], 100000); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); }, @@ -10021,7 +9975,15 @@ fn test_remove_expired_outbound_unfunded_channels() { nodes[0].node.timer_tick_occurred(); check_outbound_channel_existence(false); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + match msg_events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { + assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake"); + }, + _ => panic!("Unexpected event"), + } + check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -10064,5 +10026,100 @@ fn test_remove_expired_inbound_unfunded_channels() { nodes[1].node.timer_tick_occurred(); check_inbound_channel_existence(false); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + match msg_events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { + assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake"); + }, + _ => panic!("Unexpected event"), + } + check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000); +} + +fn do_test_multi_post_event_actions(do_reload: bool) { + // Tests handling multiple post-Event actions at once. + // There is specific code in ChannelManager to handle channels where multiple post-Event + // `ChannelMonitorUpdates` are pending at once. This test exercises that code. + // + // Specifically, we test calling `get_and_clear_pending_events` while there are two + // PaymentSents from different channels and one channel has two pending `ChannelMonitorUpdate`s + // - one from an RAA and one from an inbound commitment_signed. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let (persister, chain_monitor); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_0_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2; + + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); + send_payment(&nodes[0], &[&nodes[2]], 1_000_000); + + let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000); + + nodes[1].node.claim_funds(our_payment_preimage); + check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); + + nodes[2].node.claim_funds(payment_preimage_2); + check_added_monitors!(nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000); + + for dest in &[1, 2] { + let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false); + check_added_monitors(&nodes[0], 0); + } + + let (route, payment_hash_3, _, payment_secret_3) = + get_route_and_payment_hash!(nodes[1], nodes[0], 100_000); + let payment_id = PaymentId(payment_hash_3.0); + nodes[1].node.send_payment_with_route(&route, payment_hash_3, + RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap(); + check_added_monitors(&nodes[1], 1); + + let send_event = SendEvent::from_node(&nodes[1]); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event.commitment_msg); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + if do_reload { + let nodes_0_serialized = nodes[0].node.encode(); + let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode(); + reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized); + + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2])); + } + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 4); + if let Event::PaymentSent { payment_preimage, .. } = events[0] { + assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2); + } else { panic!(); } + if let Event::PaymentSent { payment_preimage, .. } = events[1] { + assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2); + } else { panic!(); } + if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); } + if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); } + + // After the events are processed, the ChannelMonitorUpdates will be released and, upon their + // completion, we'll respond to nodes[1] with an RAA + CS. + get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + check_added_monitors(&nodes[0], 3); +} + +#[test] +fn test_multi_post_event_actions() { + do_test_multi_post_event_actions(true); + do_test_multi_post_event_actions(false); }