X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchanmon_update_fail_tests.rs;h=90519f286b6546628df70781ab85abf3ba195272;hb=1bb9e64ebc6afaa21afcef4f71bb83054d9b2023;hp=7773ffeacf246380b233ac5cfc1e27bbac782aa6;hpb=f30694bd8c39a967848a84752e3b7d8d520cd0b1;p=rust-lightning diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 7773ffea..90519f28 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -13,6 +13,7 @@ //! here. See also the chanmon_fail_consistency fuzz test. use bitcoin::blockdata::block::{Block, BlockHeader}; +use bitcoin::blockdata::constants::genesis_block; use bitcoin::hash_types::BlockHash; use bitcoin::network::constants::Network; use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr}; @@ -27,9 +28,10 @@ use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler}; use routing::router::get_route; use util::config::UserConfig; use util::enforcing_trait_impls::EnforcingSigner; -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; use util::errors::APIError; use util::ser::{ReadableArgs, Writeable}; +use util::test_utils::TestBroadcaster; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; @@ -39,7 +41,7 @@ use ln::functional_test_utils::*; use util::test_utils; use prelude::*; -use std::collections::HashMap; +use sync::{Arc, Mutex}; // If persister_fail is true, we have the persister return a PermanentFailure // instead of the higher-level ChainMonitor. @@ -107,6 +109,13 @@ fn test_monitor_and_persister_update_fail() { let chain_source = test_utils::TestChainSource::new(Network::Testnet); let logger = test_utils::TestLogger::with_id(format!("node {}", 0)); let persister = test_utils::TestPersister::new(); + let tx_broadcaster = TestBroadcaster { + txn_broadcasted: Mutex::new(Vec::new()), + // Because we will connect a block at height 200 below, we need the TestBroadcaster to know + // that we are at height 200 so that it doesn't think we're violating the time lock + // requirements of transactions broadcasted at that point. + blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet).header, 200); 200])), + }; let chain_mon = { let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap(); let monitor = monitors.get(&outpoint).unwrap(); @@ -115,7 +124,7 @@ fn test_monitor_and_persister_update_fail() { let new_monitor = <(BlockHash, ChannelMonitor)>::read( &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1; assert!(new_monitor == *monitor); - let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); + let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); assert!(chain_mon.watch_channel(outpoint, new_monitor).is_ok()); chain_mon }; @@ -188,7 +197,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail if disconnect { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } match persister_fail { @@ -211,11 +220,16 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail let events_3 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { assert_eq!(payment_hash_1, *payment_hash); - assert!(payment_preimage.is_none()); - assert_eq!(payment_secret_1, *payment_secret); assert_eq!(amt, 1000000); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_1, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } }, _ => panic!("Unexpected event"), } @@ -242,7 +256,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail if disconnect { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } // ...and make sure we can force-close a frozen channel @@ -580,11 +594,16 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_5 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_5.len(), 1); match events_5[0] { - Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { assert_eq!(payment_hash_2, *payment_hash); - assert!(payment_preimage.is_none()); - assert_eq!(payment_secret_2, *payment_secret); assert_eq!(amt, 1000000); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_2, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } }, _ => panic!("Unexpected event"), } @@ -695,11 +714,16 @@ fn test_monitor_update_fail_cs() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentReceived { payment_hash, payment_preimage, payment_secret, amt, user_payment_id: _ } => { + Event::PaymentReceived { payment_hash, ref purpose, amt } => { assert_eq!(payment_hash, our_payment_hash); - assert!(payment_preimage.is_none()); - assert_eq!(our_payment_secret, payment_secret); assert_eq!(amt, 1000000); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(our_payment_secret, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } }, _ => panic!("Unexpected event"), }; @@ -896,8 +920,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); - // Attempt to forward a third payment but fail due to the second channel being unavailable - // for forwarding. + // Forward a third payment which will also be added to the holding cell, despite the channel + // being paused waiting a monitor update. let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[2]); { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; @@ -912,39 +936,11 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); check_added_monitors!(nodes[1], 0); - let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events_2.len(), 1); - match events_2.remove(0) { - MessageSendEvent::UpdateHTLCs { node_id, updates } => { - assert_eq!(node_id, nodes[0].node.get_our_node_id()); - assert!(updates.update_fulfill_htlcs.is_empty()); - assert_eq!(updates.update_fail_htlcs.len(), 1); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_add_htlcs.is_empty()); - assert!(updates.update_fee.is_none()); - - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true); - - let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - match msg_events[0] { - MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => { - assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id); - assert_eq!(msg.contents.flags & 2, 2); // temp disabled - }, - _ => panic!("Unexpected event"), - } - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] { - assert_eq!(payment_hash, payment_hash_3); - assert!(!rejected_by_dest); - } else { panic!("Unexpected event!"); } - }, - _ => panic!("Unexpected event type!"), - }; + // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell + // and not forwarded. + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { // Try to route another payment backwards from 2 to make sure 1 holds off on responding @@ -961,7 +957,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); (Some(payment_preimage_4), Some(payment_hash_4)) } else { (None, None) }; @@ -1011,14 +1006,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0); commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false); - let events_4 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_4.len(), 1); - if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events_4[0] { - assert_eq!(payment_hash, payment_hash_1); - assert!(rejected_by_dest); - } else { panic!("Unexpected event!"); } + expect_payment_failed!(nodes[0], payment_hash_1, true); nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]); + let as_cs; if test_ignore_second_cs { nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); check_added_monitors!(nodes[2], 1); @@ -1034,40 +1025,83 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors!(nodes[1], 1); - let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - assert!(as_cs.update_add_htlcs.is_empty()); - assert!(as_cs.update_fail_htlcs.is_empty()); - assert!(as_cs.update_fail_malformed_htlcs.is_empty()); - assert!(as_cs.update_fulfill_htlcs.is_empty()); - assert!(as_cs.update_fee.is_none()); + as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[1], 1); - let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - - nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed); + } else { + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); check_added_monitors!(nodes[2], 1); - let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa); - check_added_monitors!(nodes[2], 1); - assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(bs_revoke_and_commit.len(), 2); + match bs_revoke_and_commit[0] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg); + check_added_monitors!(nodes[1], 1); + }, + _ => panic!("Unexpected event"), + } - nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa); - check_added_monitors!(nodes[1], 1); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - } else { - commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false); + as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + + match bs_revoke_and_commit[1] { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed); + check_added_monitors!(nodes[1], 1); + }, + _ => panic!("Unexpected event"), + } } + assert_eq!(as_cs.update_add_htlcs.len(), 1); + assert!(as_cs.update_fail_htlcs.is_empty()); + assert!(as_cs.update_fail_malformed_htlcs.is_empty()); + assert!(as_cs.update_fulfill_htlcs.is_empty()); + assert!(as_cs.update_fee.is_none()); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + + + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]); + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed); + check_added_monitors!(nodes[2], 1); + let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa); + check_added_monitors!(nodes[2], 1); + let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed); + check_added_monitors!(nodes[1], 1); + let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa); + check_added_monitors!(nodes[2], 1); + assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + expect_pending_htlcs_forwardable!(nodes[2]); let events_6 = nodes[2].node.get_and_clear_pending_events(); - assert_eq!(events_6.len(), 1); + assert_eq!(events_6.len(), 2); match events_6[0] { Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); }, _ => panic!("Unexpected event"), }; + match events_6[1] { + Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); }, + _ => panic!("Unexpected event"), + }; if test_ignore_second_cs { expect_pending_htlcs_forwardable!(nodes[1]); @@ -1139,7 +1173,10 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert_eq!( + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); @@ -1153,10 +1190,15 @@ fn test_monitor_update_fail_reestablish() { assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id())); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); + assert_eq!( + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish); check_added_monitors!(nodes[1], 0); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert_eq!( + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); @@ -1333,14 +1375,14 @@ fn claim_while_disconnected_monitor_update_fail() { let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor // update. *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1473,7 +1515,9 @@ fn monitor_failed_no_reestablish_response() { let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); + let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); + let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); @@ -1602,9 +1646,9 @@ fn first_message_on_recv_ordering() { fn test_monitor_update_fail_claim() { // Basic test for monitor update failures when processing claim_funds calls. // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor - // update to claim the payment. We then send a payment C->B->A, making the forward of this - // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor - // updating and claim the payment on B. + // update to claim the payment. We then send two payments C->B->A, which are held at B. + // Finally, we restore the channel monitor updating and claim the payment on B, forwarding + // the payments from C onwards to A. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); @@ -1620,12 +1664,19 @@ fn test_monitor_update_fail_claim() { *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); assert!(nodes[1].node.claim_funds(payment_preimage_1)); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); + // Note that at this point there is a pending commitment transaction update for A being held by + // B. Even when we go to send the payment from C through B to A, B will not update this + // already-signed commitment transaction and will instead wait for it to resolve before + // forwarding the payment onwards. + let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]); + let route; { let net_graph_msg_handler = &nodes[2].net_graph_msg_handler; - let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[2], 1); } @@ -1640,29 +1691,19 @@ fn test_monitor_update_fail_claim() { nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); - let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); - commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true); - - let msg_events = nodes[2].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - match msg_events[0] { - MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => { - assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id); - assert_eq!(msg.contents.flags & 2, 2); // temp disabled - }, - _ => panic!("Unexpected event"), - } + let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]); + nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap(); + check_added_monitors!(nodes[2], 1); - let events = nodes[2].node.get_and_clear_pending_events(); + let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] { - assert_eq!(payment_hash, payment_hash_2); - assert!(!rejected_by_dest); - } else { panic!("Unexpected event!"); } + let payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); + commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); // Now restore monitor updating on the 0<->1 channel and claim the funds on B. let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); @@ -1672,12 +1713,47 @@ fn test_monitor_update_fail_claim() { let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false); + expect_payment_sent!(nodes[0], payment_preimage_1); + + // Get the payment forwards, note that they were batched into one commitment update. + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]); + commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false); + expect_pending_htlcs_forwardable!(nodes[0]); let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let Event::PaymentSent { payment_preimage, .. } = events[0] { - assert_eq!(payment_preimage, payment_preimage_1); - } else { panic!("Unexpected event!"); } + assert_eq!(events.len(), 2); + match events[0] { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { + assert_eq!(payment_hash_2, *payment_hash); + assert_eq!(1_000_000, amt); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_2, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { + assert_eq!(payment_hash_3, *payment_hash); + assert_eq!(1_000_000, amt); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_3, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } + }, + _ => panic!("Unexpected event"), + } } #[test] @@ -1871,7 +1947,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: // Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1963,7 +2039,7 @@ fn test_path_paused_mpp() { // Pass the first HTLC of the payment along to nodes[3]. let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), payment_secret, events.pop().unwrap(), false); + pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false, None); // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. @@ -1971,7 +2047,7 @@ fn test_path_paused_mpp() { nodes[0].node.channel_monitor_updated(&outpoint, latest_update); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), payment_secret, events.pop().unwrap(), true); + pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None); claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage); } @@ -2174,3 +2250,119 @@ fn channel_holding_cell_serialize() { do_channel_holding_cell_serialize(true, false); do_channel_holding_cell_serialize(false, true); // last arg doesn't matter } + +#[derive(PartialEq)] +enum HTLCStatusAtDupClaim { + Received, + HoldingCell, + Cleared, +} +fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_fails: bool) { + // When receiving an update_fulfill_htlc message, we immediately forward the claim backwards + // along the payment path before waiting for a full commitment_signed dance. This is great, but + // can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects, + // reconnects, and then has to re-send its update_fulfill_htlc message again. + // In previous code, we didn't handle the double-claim correctly, spuriously closing the + // channel on which the inbound HTLC was received. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()).2; + + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000); + + let mut as_raa = None; + if htlc_status == HTLCStatusAtDupClaim::HoldingCell { + // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be + // awaiting a remote revoke_and_ack from nodes[0]. + let (_, second_payment_hash, second_payment_secret) = get_payment_preimage_hash!(nodes[1]); + let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), + &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, nodes[1].logger).unwrap(); + nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg); + check_added_monitors!(nodes[1], 1); + + let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); + check_added_monitors!(nodes[0], 1); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs); + check_added_monitors!(nodes[0], 1); + + as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id())); + } + + let fulfill_msg = msgs::UpdateFulfillHTLC { + channel_id: chan_2, + htlc_id: 0, + payment_preimage, + }; + if second_fails { + assert!(nodes[2].node.fail_htlc_backwards(&payment_hash)); + expect_pending_htlcs_forwardable!(nodes[2]); + check_added_monitors!(nodes[2], 1); + get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + } else { + assert!(nodes[2].node.claim_funds(payment_preimage)); + check_added_monitors!(nodes[2], 1); + let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1); + // Check that the message we're about to deliver matches the one generated: + assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]); + } + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg); + check_added_monitors!(nodes[1], 1); + + let mut bs_updates = None; + if htlc_status != HTLCStatusAtDupClaim::HoldingCell { + bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); + assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]); + expect_payment_sent!(nodes[0], payment_preimage); + if htlc_status == HTLCStatusAtDupClaim::Cleared { + commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false); + } + } else { + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + } + + nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false); + nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + + if second_fails { + reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); + expect_pending_htlcs_forwardable!(nodes[1]); + } else { + reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + if htlc_status == HTLCStatusAtDupClaim::HoldingCell { + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap()); + check_added_monitors!(nodes[1], 1); + expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it + + bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); + assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]); + expect_payment_sent!(nodes[0], payment_preimage); + } + if htlc_status != HTLCStatusAtDupClaim::Cleared { + commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false); + } +} + +#[test] +fn test_reconnect_dup_htlc_claims() { + do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, false); + do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, false); + do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, false); + do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, true); + do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true); + do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true); +}