X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchanmon_update_fail_tests.rs;h=4c85040fd0adedc62fbfd03b73d65acc357d38d8;hb=d32052fbf6c7b69379ad839a8d55aff318df6391;hp=fdcf7c2de70ee71edb5bd8e3d2ff4dfdafa67606;hpb=94528f00f57a9ce19fe94d0d4b938cafa3bbe697;p=rust-lightning diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index fdcf7c2d..4c85040f 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -28,7 +28,7 @@ use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler}; use routing::router::get_route; use util::config::UserConfig; use util::enforcing_trait_impls::EnforcingSigner; -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; use util::errors::APIError; use util::ser::{ReadableArgs, Writeable}; use util::test_utils::TestBroadcaster; @@ -41,7 +41,7 @@ use ln::functional_test_utils::*; use util::test_utils; use prelude::*; -use std::sync::{Arc, Mutex}; +use sync::{Arc, Mutex}; // If persister_fail is true, we have the persister return a PermanentFailure // instead of the higher-level ChainMonitor. @@ -220,11 +220,16 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail let events_3 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { assert_eq!(payment_hash_1, *payment_hash); - assert!(payment_preimage.is_none()); - assert_eq!(payment_secret_1, *payment_secret); assert_eq!(amt, 1000000); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_1, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } }, _ => panic!("Unexpected event"), } @@ -589,11 +594,16 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_5 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_5.len(), 1); match events_5[0] { - Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { assert_eq!(payment_hash_2, *payment_hash); - assert!(payment_preimage.is_none()); - assert_eq!(payment_secret_2, *payment_secret); assert_eq!(amt, 1000000); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_2, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } }, _ => panic!("Unexpected event"), } @@ -704,11 +714,16 @@ fn test_monitor_update_fail_cs() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentReceived { payment_hash, payment_preimage, payment_secret, amt, user_payment_id: _ } => { + Event::PaymentReceived { payment_hash, ref purpose, amt } => { assert_eq!(payment_hash, our_payment_hash); - assert!(payment_preimage.is_none()); - assert_eq!(our_payment_secret, payment_secret); assert_eq!(amt, 1000000); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(our_payment_secret, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } }, _ => panic!("Unexpected event"), }; @@ -905,8 +920,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); - // Attempt to forward a third payment but fail due to the second channel being unavailable - // for forwarding. + // Forward a third payment which will also be added to the holding cell, despite the channel + // being paused waiting a monitor update. let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[2]); { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; @@ -921,39 +936,11 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); check_added_monitors!(nodes[1], 0); - let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events_2.len(), 1); - match events_2.remove(0) { - MessageSendEvent::UpdateHTLCs { node_id, updates } => { - assert_eq!(node_id, nodes[0].node.get_our_node_id()); - assert!(updates.update_fulfill_htlcs.is_empty()); - assert_eq!(updates.update_fail_htlcs.len(), 1); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_add_htlcs.is_empty()); - assert!(updates.update_fee.is_none()); - - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true); - - let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - match msg_events[0] { - MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => { - assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id); - assert_eq!(msg.contents.flags & 2, 2); // temp disabled - }, - _ => panic!("Unexpected event"), - } - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] { - assert_eq!(payment_hash, payment_hash_3); - assert!(!rejected_by_dest); - } else { panic!("Unexpected event!"); } - }, - _ => panic!("Unexpected event type!"), - }; + // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell + // and not forwarded. + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { // Try to route another payment backwards from 2 to make sure 1 holds off on responding @@ -970,7 +957,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); (Some(payment_preimage_4), Some(payment_hash_4)) } else { (None, None) }; @@ -1020,14 +1006,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0); commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false); - let events_4 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_4.len(), 1); - if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events_4[0] { - assert_eq!(payment_hash, payment_hash_1); - assert!(rejected_by_dest); - } else { panic!("Unexpected event!"); } + expect_payment_failed!(nodes[0], payment_hash_1, true); nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]); + let as_cs; if test_ignore_second_cs { nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); check_added_monitors!(nodes[2], 1); @@ -1043,40 +1025,83 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors!(nodes[1], 1); - let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - assert!(as_cs.update_add_htlcs.is_empty()); - assert!(as_cs.update_fail_htlcs.is_empty()); - assert!(as_cs.update_fail_malformed_htlcs.is_empty()); - assert!(as_cs.update_fulfill_htlcs.is_empty()); - assert!(as_cs.update_fee.is_none()); + as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[1], 1); - let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - - nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed); + } else { + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); check_added_monitors!(nodes[2], 1); - let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa); - check_added_monitors!(nodes[2], 1); - assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(bs_revoke_and_commit.len(), 2); + match bs_revoke_and_commit[0] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg); + check_added_monitors!(nodes[1], 1); + }, + _ => panic!("Unexpected event"), + } - nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa); - check_added_monitors!(nodes[1], 1); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - } else { - commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false); + as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + + match bs_revoke_and_commit[1] { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed); + check_added_monitors!(nodes[1], 1); + }, + _ => panic!("Unexpected event"), + } } + assert_eq!(as_cs.update_add_htlcs.len(), 1); + assert!(as_cs.update_fail_htlcs.is_empty()); + assert!(as_cs.update_fail_malformed_htlcs.is_empty()); + assert!(as_cs.update_fulfill_htlcs.is_empty()); + assert!(as_cs.update_fee.is_none()); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + + + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]); + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed); + check_added_monitors!(nodes[2], 1); + let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa); + check_added_monitors!(nodes[2], 1); + let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed); + check_added_monitors!(nodes[1], 1); + let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa); + check_added_monitors!(nodes[2], 1); + assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + expect_pending_htlcs_forwardable!(nodes[2]); let events_6 = nodes[2].node.get_and_clear_pending_events(); - assert_eq!(events_6.len(), 1); + assert_eq!(events_6.len(), 2); match events_6[0] { Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); }, _ => panic!("Unexpected event"), }; + match events_6[1] { + Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); }, + _ => panic!("Unexpected event"), + }; if test_ignore_second_cs { expect_pending_htlcs_forwardable!(nodes[1]); @@ -1148,7 +1173,10 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert_eq!( + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); @@ -1162,10 +1190,15 @@ fn test_monitor_update_fail_reestablish() { assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id())); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); + assert_eq!( + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish); check_added_monitors!(nodes[1], 0); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert_eq!( + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); @@ -1342,14 +1375,14 @@ fn claim_while_disconnected_monitor_update_fail() { let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor // update. *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1482,7 +1515,9 @@ fn monitor_failed_no_reestablish_response() { let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); + let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); + let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); @@ -1611,9 +1646,9 @@ fn first_message_on_recv_ordering() { fn test_monitor_update_fail_claim() { // Basic test for monitor update failures when processing claim_funds calls. // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor - // update to claim the payment. We then send a payment C->B->A, making the forward of this - // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor - // updating and claim the payment on B. + // update to claim the payment. We then send two payments C->B->A, which are held at B. + // Finally, we restore the channel monitor updating and claim the payment on B, forwarding + // the payments from C onwards to A. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); @@ -1629,12 +1664,19 @@ fn test_monitor_update_fail_claim() { *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); assert!(nodes[1].node.claim_funds(payment_preimage_1)); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); + // Note that at this point there is a pending commitment transaction update for A being held by + // B. Even when we go to send the payment from C through B to A, B will not update this + // already-signed commitment transaction and will instead wait for it to resolve before + // forwarding the payment onwards. + let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]); + let route; { let net_graph_msg_handler = &nodes[2].net_graph_msg_handler; - let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[2], 1); } @@ -1649,29 +1691,19 @@ fn test_monitor_update_fail_claim() { nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); - let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); - commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true); - - let msg_events = nodes[2].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - match msg_events[0] { - MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => { - assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id); - assert_eq!(msg.contents.flags & 2, 2); // temp disabled - }, - _ => panic!("Unexpected event"), - } + let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]); + nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap(); + check_added_monitors!(nodes[2], 1); - let events = nodes[2].node.get_and_clear_pending_events(); + let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] { - assert_eq!(payment_hash, payment_hash_2); - assert!(!rejected_by_dest); - } else { panic!("Unexpected event!"); } + let payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); + commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); // Now restore monitor updating on the 0<->1 channel and claim the funds on B. let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); @@ -1681,12 +1713,47 @@ fn test_monitor_update_fail_claim() { let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false); + expect_payment_sent!(nodes[0], payment_preimage_1); + + // Get the payment forwards, note that they were batched into one commitment update. + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]); + commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false); + expect_pending_htlcs_forwardable!(nodes[0]); let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let Event::PaymentSent { payment_preimage, .. } = events[0] { - assert_eq!(payment_preimage, payment_preimage_1); - } else { panic!("Unexpected event!"); } + assert_eq!(events.len(), 2); + match events[0] { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { + assert_eq!(payment_hash_2, *payment_hash); + assert_eq!(1_000_000, amt); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_2, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { + assert_eq!(payment_hash_3, *payment_hash); + assert_eq!(1_000_000, amt); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_3, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } + }, + _ => panic!("Unexpected event"), + } } #[test] @@ -1972,7 +2039,7 @@ fn test_path_paused_mpp() { // Pass the first HTLC of the payment along to nodes[3]. let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), payment_secret, events.pop().unwrap(), false); + pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), payment_secret, events.pop().unwrap(), false, None); // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. @@ -1980,7 +2047,7 @@ fn test_path_paused_mpp() { nodes[0].node.channel_monitor_updated(&outpoint, latest_update); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), payment_secret, events.pop().unwrap(), true); + pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), payment_secret, events.pop().unwrap(), true, None); claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage); }