X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchanmon_update_fail_tests.rs;h=3d9582925223da731626a874536b2372d633412b;hb=d1e8d9ced595efe1dbcddde480fccc0d3f98184d;hp=619d02c8c0c452553deb156558eac250512fbd38;hpb=5e968114b64034029282e4ac81b2275e4744c7ed;p=rust-lightning diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 619d02c8..3d958292 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -13,21 +13,25 @@ //! here. See also the chanmon_fail_consistency fuzz test. use bitcoin::blockdata::block::{Block, BlockHeader}; +use bitcoin::blockdata::constants::genesis_block; use bitcoin::hash_types::BlockHash; use bitcoin::network::constants::Network; use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr}; use chain::transaction::OutPoint; use chain::Listen; use chain::Watch; -use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSendFailure}; +use ln::{PaymentPreimage, PaymentHash}; +use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure}; use ln::features::{InitFeatures, InvoiceFeatures}; use ln::msgs; use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler}; use routing::router::get_route; +use util::config::UserConfig; use util::enforcing_trait_impls::EnforcingSigner; -use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; use util::errors::APIError; use util::ser::{ReadableArgs, Writeable}; +use util::test_utils::TestBroadcaster; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; @@ -36,6 +40,9 @@ use ln::functional_test_utils::*; use util::test_utils; +use prelude::*; +use sync::{Arc, Mutex}; + // If persister_fail is true, we have the persister return a PermanentFailure // instead of the higher-level ChainMonitor. fn do_test_simple_monitor_permanent_update_fail(persister_fail: bool) { @@ -102,6 +109,13 @@ fn test_monitor_and_persister_update_fail() { let chain_source = test_utils::TestChainSource::new(Network::Testnet); let logger = test_utils::TestLogger::with_id(format!("node {}", 0)); let persister = test_utils::TestPersister::new(); + let tx_broadcaster = TestBroadcaster { + txn_broadcasted: Mutex::new(Vec::new()), + // Because we will connect a block at height 200 below, we need the TestBroadcaster to know + // that we are at height 200 so that it doesn't think we're violating the time lock + // requirements of transactions broadcasted at that point. + blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet).header, 200); 200])), + }; let chain_mon = { let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap(); let monitor = monitors.get(&outpoint).unwrap(); @@ -110,7 +124,7 @@ fn test_monitor_and_persister_update_fail() { let new_monitor = <(BlockHash, ChannelMonitor)>::read( &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1; assert!(new_monitor == *monitor); - let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); + let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); assert!(chain_mon.watch_channel(outpoint, new_monitor).is_ok()); chain_mon }; @@ -206,10 +220,16 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail let events_3 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentReceived { ref payment_hash, ref payment_secret, amt, user_payment_id: _ } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { assert_eq!(payment_hash_1, *payment_hash); - assert_eq!(Some(payment_secret_1), *payment_secret); assert_eq!(amt, 1000000); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_1, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } }, _ => panic!("Unexpected event"), } @@ -574,10 +594,16 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_5 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_5.len(), 1); match events_5[0] { - Event::PaymentReceived { ref payment_hash, ref payment_secret, amt, user_payment_id: _ } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { assert_eq!(payment_hash_2, *payment_hash); - assert_eq!(Some(payment_secret_2), *payment_secret); assert_eq!(amt, 1000000); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_2, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } }, _ => panic!("Unexpected event"), } @@ -688,10 +714,16 @@ fn test_monitor_update_fail_cs() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentReceived { payment_hash, payment_secret, amt, user_payment_id: _ } => { + Event::PaymentReceived { payment_hash, ref purpose, amt } => { assert_eq!(payment_hash, our_payment_hash); - assert_eq!(Some(our_payment_secret), payment_secret); assert_eq!(amt, 1000000); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(our_payment_secret, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } }, _ => panic!("Unexpected event"), }; @@ -888,8 +920,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); - // Attempt to forward a third payment but fail due to the second channel being unavailable - // for forwarding. + // Forward a third payment which will also be added to the holding cell, despite the channel + // being paused waiting a monitor update. let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[2]); { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; @@ -904,39 +936,11 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); check_added_monitors!(nodes[1], 0); - let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events_2.len(), 1); - match events_2.remove(0) { - MessageSendEvent::UpdateHTLCs { node_id, updates } => { - assert_eq!(node_id, nodes[0].node.get_our_node_id()); - assert!(updates.update_fulfill_htlcs.is_empty()); - assert_eq!(updates.update_fail_htlcs.len(), 1); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_add_htlcs.is_empty()); - assert!(updates.update_fee.is_none()); - - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true); - - let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - match msg_events[0] { - MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => { - assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id); - assert_eq!(msg.contents.flags & 2, 2); // temp disabled - }, - _ => panic!("Unexpected event"), - } - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] { - assert_eq!(payment_hash, payment_hash_3); - assert!(!rejected_by_dest); - } else { panic!("Unexpected event!"); } - }, - _ => panic!("Unexpected event type!"), - }; + // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell + // and not forwarded. + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { // Try to route another payment backwards from 2 to make sure 1 holds off on responding @@ -953,7 +957,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); (Some(payment_preimage_4), Some(payment_hash_4)) } else { (None, None) }; @@ -1003,14 +1006,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0); commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false); - let events_4 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_4.len(), 1); - if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events_4[0] { - assert_eq!(payment_hash, payment_hash_1); - assert!(rejected_by_dest); - } else { panic!("Unexpected event!"); } + expect_payment_failed!(nodes[0], payment_hash_1, true); nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]); + let as_cs; if test_ignore_second_cs { nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); check_added_monitors!(nodes[2], 1); @@ -1026,40 +1025,83 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors!(nodes[1], 1); - let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - assert!(as_cs.update_add_htlcs.is_empty()); - assert!(as_cs.update_fail_htlcs.is_empty()); - assert!(as_cs.update_fail_malformed_htlcs.is_empty()); - assert!(as_cs.update_fulfill_htlcs.is_empty()); - assert!(as_cs.update_fee.is_none()); + as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[1], 1); - let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - - nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed); + } else { + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); check_added_monitors!(nodes[2], 1); - let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa); - check_added_monitors!(nodes[2], 1); - assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(bs_revoke_and_commit.len(), 2); + match bs_revoke_and_commit[0] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg); + check_added_monitors!(nodes[1], 1); + }, + _ => panic!("Unexpected event"), + } - nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa); - check_added_monitors!(nodes[1], 1); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - } else { - commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false); + as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + + match bs_revoke_and_commit[1] { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed); + check_added_monitors!(nodes[1], 1); + }, + _ => panic!("Unexpected event"), + } } + assert_eq!(as_cs.update_add_htlcs.len(), 1); + assert!(as_cs.update_fail_htlcs.is_empty()); + assert!(as_cs.update_fail_malformed_htlcs.is_empty()); + assert!(as_cs.update_fulfill_htlcs.is_empty()); + assert!(as_cs.update_fee.is_none()); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + + + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]); + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed); + check_added_monitors!(nodes[2], 1); + let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa); + check_added_monitors!(nodes[2], 1); + let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed); + check_added_monitors!(nodes[1], 1); + let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa); + check_added_monitors!(nodes[2], 1); + assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + expect_pending_htlcs_forwardable!(nodes[2]); let events_6 = nodes[2].node.get_and_clear_pending_events(); - assert_eq!(events_6.len(), 1); + assert_eq!(events_6.len(), 2); match events_6[0] { Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); }, _ => panic!("Unexpected event"), }; + match events_6[1] { + Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); }, + _ => panic!("Unexpected event"), + }; if test_ignore_second_cs { expect_pending_htlcs_forwardable!(nodes[1]); @@ -1131,7 +1173,10 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert_eq!( + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); @@ -1145,10 +1190,15 @@ fn test_monitor_update_fail_reestablish() { assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id())); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); + assert_eq!( + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish); check_added_monitors!(nodes[1], 0); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert_eq!( + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); @@ -1325,14 +1375,14 @@ fn claim_while_disconnected_monitor_update_fail() { let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor // update. *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1465,7 +1515,9 @@ fn monitor_failed_no_reestablish_response() { let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); + let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); + let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); @@ -1594,9 +1646,9 @@ fn first_message_on_recv_ordering() { fn test_monitor_update_fail_claim() { // Basic test for monitor update failures when processing claim_funds calls. // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor - // update to claim the payment. We then send a payment C->B->A, making the forward of this - // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor - // updating and claim the payment on B. + // update to claim the payment. We then send two payments C->B->A, which are held at B. + // Finally, we restore the channel monitor updating and claim the payment on B, forwarding + // the payments from C onwards to A. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); @@ -1612,12 +1664,19 @@ fn test_monitor_update_fail_claim() { *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); assert!(nodes[1].node.claim_funds(payment_preimage_1)); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); + // Note that at this point there is a pending commitment transaction update for A being held by + // B. Even when we go to send the payment from C through B to A, B will not update this + // already-signed commitment transaction and will instead wait for it to resolve before + // forwarding the payment onwards. + let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]); + let route; { let net_graph_msg_handler = &nodes[2].net_graph_msg_handler; - let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[2], 1); } @@ -1632,29 +1691,19 @@ fn test_monitor_update_fail_claim() { nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); - let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); - commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true); - - let msg_events = nodes[2].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - match msg_events[0] { - MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => { - assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id); - assert_eq!(msg.contents.flags & 2, 2); // temp disabled - }, - _ => panic!("Unexpected event"), - } + let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]); + nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap(); + check_added_monitors!(nodes[2], 1); - let events = nodes[2].node.get_and_clear_pending_events(); + let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] { - assert_eq!(payment_hash, payment_hash_2); - assert!(!rejected_by_dest); - } else { panic!("Unexpected event!"); } + let payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); + commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); // Now restore monitor updating on the 0<->1 channel and claim the funds on B. let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); @@ -1664,12 +1713,47 @@ fn test_monitor_update_fail_claim() { let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false); + expect_payment_sent!(nodes[0], payment_preimage_1); + + // Get the payment forwards, note that they were batched into one commitment update. + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]); + commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false); + expect_pending_htlcs_forwardable!(nodes[0]); let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let Event::PaymentSent { payment_preimage, .. } = events[0] { - assert_eq!(payment_preimage, payment_preimage_1); - } else { panic!("Unexpected event!"); } + assert_eq!(events.len(), 2); + match events[0] { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { + assert_eq!(payment_hash_2, *payment_hash); + assert_eq!(1_000_000, amt); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_2, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { + assert_eq!(payment_hash_3, *payment_hash); + assert_eq!(1_000_000, amt); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_3, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } + }, + _ => panic!("Unexpected event"), + } } #[test] @@ -1967,3 +2051,202 @@ fn test_path_paused_mpp() { claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage); } + +fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { + // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we + // properly free them on reconnect. We previously failed such HTLCs upon serialization, but + // that behavior was both somewhat unexpected and also broken (there was a debug assertion + // which failed in such a case). + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let persister: test_utils::TestPersister; + let new_chain_monitor: test_utils::TestChainMonitor; + let nodes_0_deserialized: ChannelManager; + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000, InitFeatures::known(), InitFeatures::known()).2; + let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(&nodes[1]); + let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]); + + // Do a really complicated dance to get an HTLC into the holding cell, with MonitorUpdateFailed + // set but AwaitingRemoteRevoke unset. When this test was written, any attempts to send an HTLC + // while MonitorUpdateFailed is set are immediately failed-backwards. Thus, the only way to get + // an AddHTLC into the holding cell is to add it while AwaitingRemoteRevoke is set but + // MonitorUpdateFailed is unset, and then swap the flags. + // + // We do this by: + // a) routing a payment from node B to node A, + // b) sending a payment from node A to node B without delivering any of the generated messages, + // putting node A in AwaitingRemoteRevoke, + // c) sending a second payment from node A to node B, which is immediately placed in the + // holding cell, + // d) claiming the first payment from B, allowing us to fail the monitor update which occurs + // when we try to persist the payment preimage, + // e) delivering A's commitment_signed from (b) and the resulting B revoke_and_ack message, + // clearing AwaitingRemoteRevoke on node A. + // + // Note that because, at the end, MonitorUpdateFailed is still set, the HTLC generated in (c) + // will not be freed from the holding cell. + let (payment_preimage_0, _, _) = route_payment(&nodes[1], &[&nodes[0]], 100000); + + let route = { + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, None, &Vec::new(), 100000, TEST_FINAL_CLTV, nodes[0].logger).unwrap() + }; + + nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap(); + check_added_monitors!(nodes[0], 1); + let send = SendEvent::from_node(&nodes[0]); + assert_eq!(send.msgs.len(), 1); + + nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); + check_added_monitors!(nodes[0], 0); + + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + assert!(nodes[0].node.claim_funds(payment_preimage_0)); + check_added_monitors!(nodes[0], 1); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg); + check_added_monitors!(nodes[1], 1); + + let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa); + check_added_monitors!(nodes[0], 1); + + if disconnect { + // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just + // disconnect the peers. Note that the fuzzer originally found this issue because + // deserializing a ChannelManager in this state causes an assertion failure. + if reload_a { + let nodes_0_serialized = nodes[0].node.encode(); + let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); + nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); + + persister = test_utils::TestPersister::new(); + let keys_manager = &chanmon_cfgs[0].keys_manager; + new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager); + nodes[0].chain_monitor = &new_chain_monitor; + let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; + let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor)>::read( + &mut chan_0_monitor_read, keys_manager).unwrap(); + assert!(chan_0_monitor_read.is_empty()); + + let mut nodes_0_read = &nodes_0_serialized[..]; + let config = UserConfig::default(); + nodes_0_deserialized = { + let mut channel_monitors = HashMap::new(); + channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor); + <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + default_config: config, + keys_manager, + fee_estimator: node_cfgs[0].fee_estimator, + chain_monitor: nodes[0].chain_monitor, + tx_broadcaster: nodes[0].tx_broadcaster.clone(), + logger: nodes[0].logger, + channel_monitors, + }).unwrap().1 + }; + nodes[0].node = &nodes_0_deserialized; + assert!(nodes_0_read.is_empty()); + + nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap(); + check_added_monitors!(nodes[0], 1); + } else { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + } + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + // Now reconnect the two + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); + let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); + check_added_monitors!(nodes[1], 0); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); + let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); + + assert!(resp_0.0.is_none()); + assert!(resp_0.1.is_none()); + assert!(resp_0.2.is_none()); + assert!(resp_1.0.is_none()); + assert!(resp_1.1.is_none()); + + // Check that the freshly-generated cs is equal to the original (which we will deliver in a + // moment). + if let Some(pending_cs) = resp_1.2 { + assert!(pending_cs.update_add_htlcs.is_empty()); + assert!(pending_cs.update_fail_htlcs.is_empty()); + assert!(pending_cs.update_fulfill_htlcs.is_empty()); + assert_eq!(pending_cs.commitment_signed, cs); + } else { panic!(); } + + // There should be no monitor updates as we are still pending awaiting a failed one. + check_added_monitors!(nodes[0], 0); + check_added_monitors!(nodes[1], 0); + } + + // If we finish updating the monitor, we should free the holding cell right away (this did + // not occur prior to #756). + *nodes[0].chain_monitor.update_ret.lock().unwrap() = None; + let (funding_txo, mon_id) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + nodes[0].node.channel_monitor_updated(&funding_txo, mon_id); + + // New outbound messages should be generated immediately upon a call to + // get_and_clear_pending_msg_events (but not before). + check_added_monitors!(nodes[0], 0); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + check_added_monitors!(nodes[0], 1); + assert_eq!(events.len(), 1); + + // Deliver the pending in-flight CS + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cs); + check_added_monitors!(nodes[0], 1); + + let commitment_msg = match events.pop().unwrap() { + MessageSendEvent::UpdateHTLCs { node_id, updates } => { + assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_sent!(nodes[1], payment_preimage_0); + assert_eq!(updates.update_add_htlcs.len(), 1); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + updates.commitment_signed + }, + _ => panic!("Unexpected event type!"), + }; + + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_msg); + check_added_monitors!(nodes[1], 1); + + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 100000); + check_added_monitors!(nodes[1], 1); + + commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 100000); + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); +} +#[test] +fn channel_holding_cell_serialize() { + do_channel_holding_cell_serialize(true, true); + do_channel_holding_cell_serialize(true, false); + do_channel_holding_cell_serialize(false, true); // last arg doesn't matter +}