X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=bb9556fc46a1003e334a990ad66efbcb7c1a4c4b;hb=843d25d750c3408d3f8f917764b8a58019a9dd81;hp=e6529415f2a5ed2226afadf87caa6ca76543738c;hpb=2da0d6c0c9afb5753df9573ae7748a9087e493bf;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e6529415..bb9556fc 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -19,7 +19,7 @@ use chain::transaction::OutPoint; use chain::keysinterface::BaseSign; use ln::{PaymentPreimage, PaymentSecret, PaymentHash}; use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC}; -use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA}; +use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA}; use ln::channel::{Channel, ChannelError}; use ln::{chan_utils, onion_utils}; use ln::chan_utils::HTLC_SUCCESS_TX_WEIGHT; @@ -30,7 +30,7 @@ use ln::msgs; use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; use util::enforcing_trait_impls::EnforcingSigner; use util::{byte_utils, test_utils}; -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason}; use util::errors::APIError; use util::ser::{Writeable, ReadableArgs}; use util::config::UserConfig; @@ -638,6 +638,7 @@ fn test_update_fee_that_funder_cannot_afford() { nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }); } #[test] @@ -738,6 +739,8 @@ fn test_update_fee_with_fundee_update_add_htlc() { send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000); send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); } #[test] @@ -850,6 +853,8 @@ fn test_update_fee() { assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30); assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); } #[test] @@ -977,10 +982,20 @@ fn fake_network_test() { // Close down the channels... close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure); close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true); + check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure); close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure); close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure); } #[test] @@ -1176,6 +1191,7 @@ fn test_duplicate_htlc_different_direction_onchain() { mine_transaction(&nodes[0], &remote_txn[0]); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires // Check we only broadcast 1 timeout tx @@ -1457,6 +1473,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() }); } #[test] @@ -1583,6 +1600,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() }); } #[test] @@ -2039,6 +2057,8 @@ fn channel_monitor_network_test() { check_closed_broadcast!(nodes[0], true); assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[1].node.list_channels().len(), 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer); // One pending HTLC is discarded by the force-close: let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0; @@ -2059,6 +2079,8 @@ fn channel_monitor_network_test() { check_closed_broadcast!(nodes[2], true); assert_eq!(nodes[1].node.list_channels().len(), 0); assert_eq!(nodes[2].node.list_channels().len(), 1); + check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); macro_rules! claim_funds { ($node: expr, $prev_node: expr, $preimage: expr) => { @@ -2101,6 +2123,8 @@ fn channel_monitor_network_test() { check_closed_broadcast!(nodes[3], true); assert_eq!(nodes[2].node.list_channels().len(), 0); assert_eq!(nodes[3].node.list_channels().len(), 1); + check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer); + check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed); // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and // confusing us in the following tests. @@ -2172,6 +2196,8 @@ fn channel_monitor_network_test() { assert_eq!(nodes[4].node.list_channels().len(), 0); nodes[3].chain_monitor.chain_monitor.monitors.write().unwrap().insert(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon); + check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed); } #[test] @@ -2221,6 +2247,7 @@ fn test_justice_tx() { node_txn.truncate(1); } check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE); mine_transaction(&nodes[0], &revoked_local_txn[0]); @@ -2228,6 +2255,7 @@ fn test_justice_tx() { // Verify broadcast of revoked HTLC-timeout let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); // Broadcast revoked HTLC-timeout on node 1 mine_transaction(&nodes[1], &node_txn[1]); test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone()); @@ -2269,9 +2297,11 @@ fn test_justice_tx() { test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE); mine_transaction(&nodes[1], &revoked_local_txn[0]); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS); check_added_monitors!(nodes[1], 1); mine_transaction(&nodes[0], &node_txn[1]); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone()); } get_announce_close_broadcast_events(&nodes, 0, 1); @@ -2299,6 +2329,7 @@ fn revoked_output_claim() { // Inform nodes[1] that nodes[0] broadcast a stale tx mine_transaction(&nodes[1], &revoked_local_txn[0]); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx @@ -2308,7 +2339,8 @@ fn revoked_output_claim() { // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan mine_transaction(&nodes[0], &revoked_local_txn[0]); get_announce_close_broadcast_events(&nodes, 0, 1); - check_added_monitors!(nodes[0], 1) + check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); } #[test] @@ -2345,8 +2377,10 @@ fn claim_htlc_outputs_shared_tx() { { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); mine_transaction(&nodes[1], &revoked_local_txn[0]); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[1], payment_hash_2, true); @@ -2403,7 +2437,13 @@ fn claim_htlc_outputs_single_tx() { check_added_monitors!(nodes[0], 1); confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable_ignore!(nodes[0]); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + let mut events = nodes[0].node.get_and_clear_pending_events(); + expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true); + match events[1] { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + _ => panic!("Unexpected event"), + } connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[1], payment_hash_2, true); @@ -2481,8 +2521,8 @@ fn test_htlc_on_chain_success() { send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); - let (our_payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); - let (our_payment_preimage_2, _payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); + let (our_payment_preimage, payment_hash_1, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); + let (our_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); // Broadcast legit commitment tx from C on B's chain // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain @@ -2501,6 +2541,7 @@ fn test_htlc_on_chain_success() { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx) assert_eq!(node_txn.len(), 5); assert_eq!(node_txn[0], node_txn[3]); @@ -2526,11 +2567,15 @@ fn test_htlc_on_chain_success() { added_monitors.clear(); } let forwarded_events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(forwarded_events.len(), 2); - if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[0] { - } else { panic!(); } + assert_eq!(forwarded_events.len(), 3); + match forwarded_events[0] { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + _ => panic!("Unexpected event"), + } if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[1] { } else { panic!(); } + if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[2] { + } else { panic!(); } let events = nodes[1].node.get_and_clear_pending_msg_events(); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); @@ -2597,6 +2642,7 @@ fn test_htlc_on_chain_success() { mine_transaction(&nodes[1], &node_a_commitment_tx[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 6); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 3 (HTLC-Success, 2* RBF bumps of above HTLC txn) let commitment_spend = @@ -2632,18 +2678,20 @@ fn test_htlc_on_chain_success() { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); + assert_eq!(events.len(), 3); let mut first_claimed = false; for event in events { match event { - Event::PaymentSent { payment_preimage } => { - if payment_preimage == our_payment_preimage { + Event::PaymentSent { payment_preimage, payment_hash } => { + if payment_preimage == our_payment_preimage && payment_hash == payment_hash_1 { assert!(!first_claimed); first_claimed = true; } else { assert_eq!(payment_preimage, our_payment_preimage_2); + assert_eq!(payment_hash, payment_hash_2); } }, + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, _ => panic!("Unexpected event"), } } @@ -2700,6 +2748,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx) assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], chan_2.3); @@ -2709,6 +2758,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1); mine_transaction(&nodes[1], &commitment_tx[0]); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let timeout_tx; { let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -2776,6 +2826,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 commitment tx, ChannelMonitor : 1 timeout tx assert_eq!(node_txn.len(), 2); check_spends!(node_txn[0], chan_1.3); @@ -2814,6 +2865,7 @@ fn test_simple_commitment_revoked_fail_backward() { let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); mine_transaction(&nodes[1], &revoked_local_txn[0]); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); @@ -2963,15 +3015,19 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 }); + assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 }); match events[0] { - Event::PaymentFailed { ref payment_hash, .. } => { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { }, + _ => panic!("Unexepected event"), + } + match events[1] { + Event::PaymentPathFailed { ref payment_hash, .. } => { assert_eq!(*payment_hash, fourth_payment_hash); }, _ => panic!("Unexpected event"), } if !deliver_bs_raa { - match events[1] { + match events[2] { Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; @@ -3021,7 +3077,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 3); match events[0] { - Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => { + Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => { assert!(failed_htlcs.insert(payment_hash.0)); // If we delivered B's RAA we got an unknown preimage error, not something // that we should update our routing table for. @@ -3032,14 +3088,14 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => { + Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => { assert!(failed_htlcs.insert(payment_hash.0)); assert!(network_update.is_some()); }, _ => panic!("Unexpected event"), } match events[2] { - Event::PaymentFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => { + Event::PaymentPathFailed { ref payment_hash, rejected_by_dest: _, ref network_update, .. } => { assert!(failed_htlcs.insert(payment_hash.0)); assert!(network_update.is_some()); }, @@ -3131,9 +3187,21 @@ fn fail_backward_pending_htlc_upon_channel_failure() { }; nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc); } - + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); // Check that Alice fails backward the pending HTLC from the second payment. - expect_payment_failed!(nodes[0], failed_payment_hash, true); + match events[0] { + Event::PaymentPathFailed { payment_hash, .. } => { + assert_eq!(payment_hash, failed_payment_hash); + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::ChannelClosed { reason: ClosureReason::ProcessingError { ref err }, .. } => { + assert_eq!(err, "Remote side tried to send a 0-msat HTLC"); + }, + _ => panic!("Unexpected event {:?}", events[1]), + } check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); } @@ -3153,6 +3221,7 @@ fn test_htlc_ignore_latest_remote_commitment() { connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 3); @@ -3162,6 +3231,7 @@ fn test_htlc_ignore_latest_remote_commitment() { connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]}); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); // Duplicate the connect_block call since this may happen due to other listeners // registering new transactions @@ -3216,6 +3286,7 @@ fn test_force_close_fail_back() { nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap(); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); + check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed); let tx = { let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't @@ -3230,6 +3301,7 @@ fn test_force_close_fail_back() { // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. { @@ -3299,7 +3371,7 @@ fn test_simple_peer_disconnect() { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; + let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000); let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; @@ -3308,20 +3380,21 @@ fn test_simple_peer_disconnect() { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3); - fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5); + fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false)); { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentSent { payment_preimage } => { + Event::PaymentSent { payment_preimage, payment_hash } => { assert_eq!(payment_preimage, payment_preimage_3); + assert_eq!(payment_hash, payment_hash_3); }, _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => { + Event::PaymentPathFailed { payment_hash, rejected_by_dest, .. } => { assert_eq!(payment_hash, payment_hash_5); assert!(rejected_by_dest); }, @@ -3483,8 +3556,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 1); match events_4[0] { - Event::PaymentSent { ref payment_preimage } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash } => { assert_eq!(payment_preimage_1, *payment_preimage); + assert_eq!(payment_hash_1, *payment_hash); }, _ => panic!("Unexpected event"), } @@ -3523,8 +3597,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 1); match events_4[0] { - Event::PaymentSent { ref payment_preimage } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash } => { assert_eq!(payment_preimage_1, *payment_preimage); + assert_eq!(payment_hash_1, *payment_hash); }, _ => panic!("Unexpected event"), } @@ -3729,7 +3804,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); let logger = test_utils::TestLogger::new(); - let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); // Now try to send a second payment which will fail to send let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); @@ -3763,8 +3838,9 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentSent { ref payment_preimage } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash } => { assert_eq!(*payment_preimage, payment_preimage_1); + assert_eq!(*payment_hash, payment_hash_1); }, _ => panic!("Unexpected event"), } @@ -3886,7 +3962,8 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { // Use the utility function send_payment_along_path to send the payment with MPP data which // indicates there are more HTLCs coming. let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. - nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height, &None).unwrap(); + let payment_id = PaymentId([42; 32]); + nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height, payment_id, &None).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4083,6 +4160,34 @@ fn test_no_txn_manager_serialize_deserialize() { send_payment(&nodes[0], &[&nodes[1]], 1000000); } +#[test] +fn mpp_failure() { + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let logger = test_utils::TestLogger::new(); + + let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]); + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let path = route.paths[0].clone(); + route.paths.push(path); + route.paths[0][0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0][0].short_channel_id = chan_1_id; + route.paths[0][1].short_channel_id = chan_3_id; + route.paths[1][0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1][0].short_channel_id = chan_2_id; + route.paths[1][1].short_channel_id = chan_4_id; + send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret); + fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash); +} + #[test] fn test_dup_htlc_onchain_fails_on_reload() { // When a Channel is closed, any outbound HTLCs which were relayed through it are simply @@ -4092,7 +4197,7 @@ fn test_dup_htlc_onchain_fails_on_reload() { // // If, due to an on-chain event, an HTLC is failed/claimed, and then we serialize the // ChannelManager, we generally expect there not to be a duplicate HTLC fail/claim (eg via a - // PaymentFailed event appearing). However, because we may not serialize the relevant + // PaymentPathFailed event appearing). However, because we may not serialize the relevant // ChannelMonitor at the same time, this isn't strictly guaranteed. In order to provide this // consistency, the ChannelManager explicitly tracks pending-onchain-resolution outbound HTLCs // and de-duplicates ChannelMonitor events. @@ -4114,6 +4219,7 @@ fn test_dup_htlc_onchain_fails_on_reload() { nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); @@ -4131,6 +4237,7 @@ fn test_dup_htlc_onchain_fails_on_reload() { connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]}); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); header.prev_blockhash = nodes[0].best_block_hash(); @@ -4475,6 +4582,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { check_added_monitors!(nodes[0], 1); } nodes[0].node = &nodes_0_deserialized; + check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager); // nodes[1] and nodes[2] have no lost state with nodes[0]... reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); @@ -4538,6 +4646,7 @@ fn test_claim_sizeable_push_msat() { nodes[1].node.force_close_channel(&chan.2).unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], chan.3); @@ -4566,6 +4675,7 @@ fn test_claim_on_remote_sizeable_push_msat() { nodes[0].node.force_close_channel(&chan.2).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); @@ -4575,6 +4685,7 @@ fn test_claim_on_remote_sizeable_push_msat() { mine_transaction(&nodes[1], &node_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -4602,6 +4713,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); mine_transaction(&nodes[1], &node_txn[0]); @@ -4654,6 +4766,7 @@ fn test_static_spendable_outputs_preimage_tx() { check_spends!(node_txn[2], node_txn[1]); mine_transaction(&nodes[1], &node_txn[0]); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -4698,6 +4811,7 @@ fn test_static_spendable_outputs_timeout_tx() { assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); mine_transaction(&nodes[1], &node_txn[1]); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[1], our_payment_hash, true); @@ -4728,6 +4842,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 2); @@ -4764,6 +4879,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -4779,6 +4895,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[1].clone()] }); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 3); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs, ChannelManager: local commitment tx @@ -4835,6 +4952,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(revoked_htlc_txn.len(), 2); @@ -4851,6 +4969,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx @@ -4931,6 +5050,7 @@ fn test_onchain_to_onchain_claim() { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx) assert_eq!(c_txn.len(), 3); @@ -4947,7 +5067,19 @@ fn test_onchain_to_onchain_claim() { let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]}); check_added_monitors!(nodes[1], 1); - expect_payment_forwarded!(nodes[1], Some(1000), true); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match events[0] { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => { + assert_eq!(fee_earned_msat, Some(1000)); + assert_eq!(claim_from_onchain_tx, true); + }, + _ => panic!("Unexpected event"), + } { let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelMonitor: claim tx @@ -4955,9 +5087,9 @@ fn test_onchain_to_onchain_claim() { check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager b_txn.clear(); } + check_added_monitors!(nodes[1], 1); let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); - check_added_monitors!(nodes[1], 1); match msg_events[0] { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("Unexpected event"), @@ -4979,6 +5111,7 @@ fn test_onchain_to_onchain_claim() { // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2); mine_transaction(&nodes[1], &commitment_tx[0]); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx assert_eq!(b_txn.len(), 3); @@ -5036,6 +5169,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { mine_transaction(&nodes[1], &commitment_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires let htlc_timeout_tx; @@ -5062,6 +5196,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { nodes[2].node.claim_funds(our_payment_preimage); mine_transaction(&nodes[2], &commitment_txn[0]); check_added_monitors!(nodes[2], 2); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed); let events = nodes[2].node.get_and_clear_pending_msg_events(); match events[0] { MessageSendEvent::UpdateHTLCs { .. } => {}, @@ -5121,8 +5256,9 @@ fn test_duplicate_payment_hash_one_failure_one_success() { let events = nodes[0].node.get_and_clear_pending_events(); match events[0] { - Event::PaymentSent { ref payment_preimage } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash } => { assert_eq!(*payment_preimage, our_payment_preimage); + assert_eq!(*payment_hash, duplicate_payment_hash); } _ => panic!("Unexpected event"), } @@ -5149,6 +5285,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() { check_added_monitors!(nodes[1], 1); mine_transaction(&nodes[1], &local_txn[0]); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let events = nodes[1].node.get_and_clear_pending_msg_events(); match events[0] { MessageSendEvent::UpdateHTLCs { .. } => {}, @@ -5319,9 +5456,26 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno } else { mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]); } + let events = nodes[2].node.get_and_clear_pending_events(); + let close_event = if deliver_last_raa { + assert_eq!(events.len(), 2); + events[1].clone() + } else { + assert_eq!(events.len(), 1); + events[0].clone() + }; + match close_event { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + _ => panic!("Unexpected event"), + } + connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1); check_closed_broadcast!(nodes[2], true); - expect_pending_htlcs_forwardable!(nodes[2]); + if deliver_last_raa { + expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true); + } else { + expect_pending_htlcs_forwardable!(nodes[2]); + } check_added_monitors!(nodes[2], 3); let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events(); @@ -5370,7 +5524,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let mut as_failds = HashSet::new(); let mut as_updates = 0; for event in as_events.iter() { - if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event { + if let &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event { assert!(as_failds.insert(*payment_hash)); if *payment_hash != payment_hash_2 { assert_eq!(*rejected_by_dest, deliver_last_raa); @@ -5395,7 +5549,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let mut bs_failds = HashSet::new(); let mut bs_updates = 0; for event in bs_events.iter() { - if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event { + if let &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, .. } = event { assert!(bs_failds.insert(*payment_hash)); if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 { assert_eq!(*rejected_by_dest, deliver_last_raa); @@ -5458,6 +5612,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { mine_transaction(&nodes[0], &local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires let htlc_timeout = { @@ -5541,6 +5696,7 @@ fn test_key_derivation_params() { connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); let htlc_timeout = { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -5581,6 +5737,7 @@ fn test_static_output_closing_tx() { let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2; mine_transaction(&nodes[0], &closing_tx); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); @@ -5588,6 +5745,7 @@ fn test_static_output_closing_tx() { check_spends!(spend_txn[0], closing_tx); mine_transaction(&nodes[1], &closing_tx); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -5602,7 +5760,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 }); + let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 }); // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being // present in B's local commitment transaction, but none of A's commitment transactions. @@ -5614,8 +5772,9 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentSent { payment_preimage } => { + Event::PaymentSent { payment_preimage, payment_hash } => { assert_eq!(payment_preimage, our_payment_preimage); + assert_eq!(payment_hash, our_payment_hash); }, _ => panic!("Unexpected event"), } @@ -5638,6 +5797,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); } fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { @@ -5670,6 +5830,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); } fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { @@ -5718,6 +5879,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); } else { expect_payment_failed!(nodes[0], our_payment_hash, true); } @@ -5913,9 +6075,10 @@ fn test_fail_holding_cell_htlc_upon_free() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match &events[0] { - &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data } => { + &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed, path: _ } => { assert_eq!(our_payment_hash.clone(), *payment_hash); assert_eq!(*rejected_by_dest, false); + assert_eq!(*all_paths_failed, true); assert_eq!(*network_update, None); assert_eq!(*error_code, None); assert_eq!(*error_data, None); @@ -5999,9 +6162,10 @@ fn test_free_and_fail_holding_cell_htlcs() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match &events[0] { - &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data } => { + &Event::PaymentPathFailed { ref payment_hash, ref rejected_by_dest, ref network_update, ref error_code, ref error_data, ref all_paths_failed, path: _ } => { assert_eq!(payment_hash_2.clone(), *payment_hash); assert_eq!(*rejected_by_dest, false); + assert_eq!(*all_paths_failed, true); assert_eq!(*network_update, None); assert_eq!(*error_code, None); assert_eq!(*error_data, None); @@ -6044,8 +6208,9 @@ fn test_free_and_fail_holding_cell_htlcs() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentSent { ref payment_preimage } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash } => { assert_eq!(*payment_preimage, payment_preimage_1); + assert_eq!(*payment_hash, payment_hash_1); } _ => panic!("Unexpected event"), } @@ -6255,6 +6420,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1); check_closed_broadcast!(nodes[1], true).unwrap(); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }); } #[test] @@ -6382,6 +6548,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6418,6 +6585,7 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6462,6 +6630,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6487,6 +6656,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6512,6 +6682,7 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height"); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6561,6 +6732,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6594,6 +6766,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6627,6 +6800,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6660,6 +6834,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6701,6 +6876,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find"); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6742,6 +6918,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6790,6 +6967,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set"); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }); } #[test] @@ -6932,16 +7110,17 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let events = nodes[0].node.get_and_clear_pending_events(); - // Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx + // Only 2 PaymentPathFailed events should show up, over-dust HTLC has to be failed by timeout tx assert_eq!(events.len(), 2); let mut first_failed = false; for event in events { match event { - Event::PaymentFailed { payment_hash, .. } => { + Event::PaymentPathFailed { payment_hash, .. } => { if payment_hash == payment_hash_1 { assert!(!first_failed); first_failed = true; @@ -6992,6 +7171,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { if local { // We fail dust-HTLC 1 by broadcast of local commitment tx mine_transaction(&nodes[0], &as_commitment_tx[0]); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[0], dust_hash, true); @@ -7011,6 +7191,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { mine_transaction(&nodes[0], &bs_commitment_tx[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone()); @@ -7029,14 +7210,14 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { assert_eq!(events.len(), 2); let first; match events[0] { - Event::PaymentFailed { payment_hash, .. } => { + Event::PaymentPathFailed { payment_hash, .. } => { if payment_hash == dust_hash { first = true; } else { first = false; } }, _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentFailed { payment_hash, .. } => { + Event::PaymentPathFailed { payment_hash, .. } => { if first { assert_eq!(payment_hash, non_dust_hash); } else { assert_eq!(payment_hash, dust_hash); } }, @@ -7092,14 +7273,17 @@ fn test_user_configurable_csv_delay() { let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); accept_channel.to_self_delay = 200; nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel); + let reason_msg; if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] { match action { &ErrorAction::SendErrorMessage { ref msg } => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str())); + reason_msg = msg.data.clone(); }, - _ => { assert!(false); } + _ => { panic!(); } } - } else { assert!(false); } + } else { panic!(); } + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }); // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req() nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); @@ -7212,10 +7396,10 @@ fn test_data_loss_protect() { // Check we close channel detecting A is fallen-behind nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() }); assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction"); check_added_monitors!(nodes[1], 1); - // Check A is able to claim to_remote output let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); @@ -7223,6 +7407,7 @@ fn test_data_loss_protect() { assert_eq!(node_txn[0].output.len(), 2); mine_transaction(&nodes[0], &node_txn[0]); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() }); let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); assert_eq!(spend_txn.len(), 1); check_spends!(spend_txn[0], node_txn[0]); @@ -7665,6 +7850,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above) let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -7686,7 +7872,12 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] }); let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] }); - expect_pending_htlcs_forwardable_ignore!(nodes[0]); + let events = nodes[0].node.get_and_clear_pending_events(); + expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true); + match events[1] { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + _ => panic!("Unexpected event"), + } let first; let feerate_1; let penalty_txn; @@ -7932,6 +8123,7 @@ fn test_counterparty_raa_skip_no_crash() { &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point }); assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack"); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }); } #[test] @@ -7964,6 +8156,7 @@ fn test_bump_txn_sanitize_tracking_maps() { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); let penalty_txn = { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx @@ -8447,6 +8640,7 @@ fn test_pre_lockin_no_chan_closed_update() { let channel_id = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id(); nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() }); assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty()); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Hi".to_string() }); } #[test] @@ -8481,6 +8675,7 @@ fn test_htlc_no_detection() { chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); let htlc_timeout = { @@ -8540,6 +8735,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap(); check_closed_broadcast!(nodes[force_closing_node], true); check_added_monitors!(nodes[force_closing_node], 1); + check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed); if go_onchain_before_fulfill { let txn_to_broadcast = match broadcast_alice { true => alice_txn.clone(), @@ -8551,6 +8747,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain if broadcast_alice { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); } assert_eq!(bob_txn.len(), 1); check_spends!(bob_txn[0], chan_ab.3); @@ -8631,6 +8828,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain if broadcast_alice { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); } let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); if broadcast_alice { @@ -8846,6 +9044,7 @@ fn test_error_chans_closed() { nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], false); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() }); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); assert_eq!(nodes[0].node.list_usable_channels().len(), 2); assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2); @@ -8855,6 +9054,7 @@ fn test_error_chans_closed() { let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known()); nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() }); check_added_monitors!(nodes[0], 2); + check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: "ERR".to_string() }); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); match events[0] { @@ -8919,6 +9119,7 @@ fn test_invalid_funding_tx() { nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); confirm_transaction_at(&nodes[1], &tx, 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); check_added_monitors!(nodes[1], 1); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); @@ -8962,6 +9163,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t nodes[1].node.force_close_channel(&channel_id).unwrap(); check_closed_broadcast!(nodes[1], true); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); check_added_monitors!(nodes[1], 1); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); @@ -9014,6 +9216,125 @@ fn test_tx_confirmed_skipping_blocks_immediate_broadcast() { do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true); } +#[test] +fn test_forwardable_regen() { + // Tests that if we reload a ChannelManager while forwards are pending we will regenerate the + // PendingHTLCsForwardable event automatically, ensuring we don't forget to forward/receive + // HTLCs. + // We test it for both payment receipt and payment forwarding. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let persister: test_utils::TestPersister; + let new_chain_monitor: test_utils::TestChainMonitor; + let nodes_1_deserialized: ChannelManager; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + + // First send a payment to nodes[1] + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); + nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + + // Next send a payment which is forwarded by nodes[1] + let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 200_000); + nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + // There is already a PendingHTLCsForwardable event "pending" so another one will not be + // generated + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + + let nodes_1_serialized = nodes[1].node.encode(); + let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); + let mut chan_1_monitor_serialized = test_utils::TestVecWriter(Vec::new()); + { + let monitors = nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap(); + let mut monitor_iter = monitors.iter(); + monitor_iter.next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); + monitor_iter.next().unwrap().1.write(&mut chan_1_monitor_serialized).unwrap(); + } + + persister = test_utils::TestPersister::new(); + let keys_manager = &chanmon_cfgs[1].keys_manager; + new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[1].chain_source), nodes[1].tx_broadcaster.clone(), nodes[1].logger, node_cfgs[1].fee_estimator, &persister, keys_manager); + nodes[1].chain_monitor = &new_chain_monitor; + + let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; + let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor)>::read( + &mut chan_0_monitor_read, keys_manager).unwrap(); + assert!(chan_0_monitor_read.is_empty()); + let mut chan_1_monitor_read = &chan_1_monitor_serialized.0[..]; + let (_, mut chan_1_monitor) = <(BlockHash, ChannelMonitor)>::read( + &mut chan_1_monitor_read, keys_manager).unwrap(); + assert!(chan_1_monitor_read.is_empty()); + + let mut nodes_1_read = &nodes_1_serialized[..]; + let (_, nodes_1_deserialized_tmp) = { + let mut channel_monitors = HashMap::new(); + channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor); + channel_monitors.insert(chan_1_monitor.get_funding_txo().0, &mut chan_1_monitor); + <(BlockHash, ChannelManager)>::read(&mut nodes_1_read, ChannelManagerReadArgs { + default_config: UserConfig::default(), + keys_manager, + fee_estimator: node_cfgs[1].fee_estimator, + chain_monitor: nodes[1].chain_monitor, + tx_broadcaster: nodes[1].tx_broadcaster.clone(), + logger: nodes[1].logger, + channel_monitors, + }).unwrap() + }; + nodes_1_deserialized = nodes_1_deserialized_tmp; + assert!(nodes_1_read.is_empty()); + + assert!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); + assert!(nodes[1].chain_monitor.watch_channel(chan_1_monitor.get_funding_txo().0, chan_1_monitor).is_ok()); + nodes[1].node = &nodes_1_deserialized; + check_added_monitors!(nodes[1], 2); + + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + // Note that nodes[1] and nodes[2] resend their funding_locked here since they haven't updated + // the commitment state. + reconnect_nodes(&nodes[1], &nodes[2], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash, payment_secret, 100_000); + check_added_monitors!(nodes[1], 1); + + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[2]); + expect_payment_received!(nodes[2], payment_hash_2, payment_secret_2, 200_000); + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); +} + #[test] fn test_keysend_payments_to_public_node() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -9030,7 +9351,7 @@ fn test_keysend_payments_to_public_node() { nodes[0].logger).unwrap(); let test_preimage = PaymentPreimage([42; 32]); - let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap(); + let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -9060,7 +9381,7 @@ fn test_keysend_payments_to_private_node() { nodes[0].logger).unwrap(); let test_preimage = PaymentPreimage([42; 32]); - let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap(); + let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1);