X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchanmon_update_fail_tests.rs;h=c1fe6fbdd6d7c54002d31f277481a200f5cb5b7b;hb=63c1fa6db52723e54b001e816d7c15b4478d9f73;hp=fc481f8aac7bb8e5b3326e12e5f8d5dc83ddb895;hpb=a138a9af010d5c6e80caaae56a3116051f3df653;p=rust-lightning diff --git a/src/ln/chanmon_update_fail_tests.rs b/src/ln/chanmon_update_fail_tests.rs index fc481f8a..c1fe6fbd 100644 --- a/src/ln/chanmon_update_fail_tests.rs +++ b/src/ln/chanmon_update_fail_tests.rs @@ -6,22 +6,20 @@ use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash}; use ln::channelmonitor::ChannelMonitorUpdateErr; use ln::msgs; -use ln::msgs::ChannelMessageHandler; +use ln::msgs::{ChannelMessageHandler, LocalFeatures, RoutingMessageHandler}; use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; use util::errors::APIError; use bitcoin_hashes::sha256::Hash as Sha256; use bitcoin_hashes::Hash; -use std::time::Instant; - use ln::functional_test_utils::*; #[test] fn test_simple_monitor_permanent_update_fail() { // Test that we handle a simple permanent monitor update failure - let mut nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); + let mut nodes = create_network(2, &[None, None]); + create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); @@ -50,8 +48,8 @@ fn test_simple_monitor_permanent_update_fail() { fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { // Test that we can recover from a simple temporary monitor update failure optionally with // a disconnect in between - let mut nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); + let mut nodes = create_network(2, &[None, None]); + create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); @@ -93,7 +91,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { _ => panic!("Unexpected event"), } - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000); // Now set it to failed again... let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); @@ -149,8 +147,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // * We then walk through more message exchanges to get the original update_add_htlc // through, swapping message ordering based on disconnect_count & 8 and optionally // disconnect/reconnecting based on disconnect_count. - let mut nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); + let mut nodes = create_network(2, &[None, None]); + create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); @@ -168,7 +166,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1] // but nodes[0] won't respond since it is frozen. - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + assert!(nodes[1].node.claim_funds(payment_preimage_1, 1_000_000)); check_added_monitors!(nodes[1], 1); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); @@ -192,7 +190,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } } @@ -442,7 +440,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); } #[test] @@ -475,8 +473,8 @@ fn test_monitor_temporary_update_fail_c() { #[test] fn test_monitor_update_fail_cs() { // Tests handling of a monitor update failure when processing an incoming commitment_signed - let mut nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); + let mut nodes = create_network(2, &[None, None]); + create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); @@ -487,7 +485,7 @@ fn test_monitor_update_fail_cs() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -517,7 +515,7 @@ fn test_monitor_update_fail_cs() { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[0], 1); @@ -546,7 +544,7 @@ fn test_monitor_update_fail_cs() { _ => panic!("Unexpected event"), }; - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage, 1_000_000); } #[test] @@ -554,8 +552,8 @@ fn test_monitor_update_fail_no_rebroadcast() { // Tests handling of a monitor update failure when no message rebroadcasting on // test_restore_channel_monitor() is required. Backported from // chanmon_fail_consistency fuzz tests. - let mut nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); + let mut nodes = create_network(2, &[None, None]); + create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); let (payment_preimage_1, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); @@ -567,7 +565,7 @@ fn test_monitor_update_fail_no_rebroadcast() { let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -589,17 +587,17 @@ fn test_monitor_update_fail_no_rebroadcast() { _ => panic!("Unexpected event"), } - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000); } #[test] fn test_monitor_update_raa_while_paused() { // Tests handling of an RAA while monitor updating has already been marked failed. // Backported from chanmon_fail_consistency fuzz tests as this used to be broken. - let mut nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); + let mut nodes = create_network(2, &[None, None]); + create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); - send_payment(&nodes[0], &[&nodes[1]], 5000000); + send_payment(&nodes[0], &[&nodes[1]], 5000000, 5_000_000); let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); let (payment_preimage_1, our_payment_hash_1) = get_payment_preimage_hash!(nodes[0]); @@ -620,12 +618,12 @@ fn test_monitor_update_raa_while_paused() { *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[0], 1); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented responses to RAA"); } else { panic!(); } check_added_monitors!(nodes[0], 1); @@ -657,24 +655,24 @@ fn test_monitor_update_raa_while_paused() { expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], our_payment_hash_1, 1000000); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); - claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000); + claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2, 1_000_000); } fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Tests handling of a monitor update failure when processing an incoming RAA - let mut nodes = create_network(3); - create_announced_chan_between_nodes(&nodes, 0, 1); - let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + let mut nodes = create_network(3, &[None, None, None]); + create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new()); // Rebalance a bit so that we can send backwards from 2 to 1. - send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000); // Route a first payment that we'll fail backwards let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA - assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, 0)); + assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1)); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(nodes[2], 1); @@ -706,7 +704,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Now fail monitor updating. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -770,7 +768,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -899,10 +897,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()), _ => panic!("Unexpected event"), }; - claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap()); + claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap(), 1_000_000); } - claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2, 1_000_000); } #[test] @@ -916,16 +914,16 @@ fn test_monitor_update_fail_reestablish() { // Simple test for message retransmission after monitor update failure on // channel_reestablish generating a monitor update (which comes from freeing holding cell // HTLCs). - let mut nodes = create_network(3); - create_announced_chan_between_nodes(&nodes, 0, 1); - create_announced_chan_between_nodes(&nodes, 1, 2); + let mut nodes = create_network(3, &[None, None, None]); + create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); + create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new()); let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - assert!(nodes[2].node.claim_funds(our_payment_preimage)); + assert!(nodes[2].node.claim_funds(our_payment_preimage, 1_000_000)); check_added_monitors!(nodes[2], 1); let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -947,7 +945,7 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -994,8 +992,8 @@ fn raa_no_response_awaiting_raa_state() { // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel // in question (assuming it intends to respond with a CS after monitor updating is restored). // Backported from chanmon_fail_consistency fuzz tests as this used to be broken. - let mut nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); + let mut nodes = create_network(2, &[None, None]); + create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); @@ -1035,12 +1033,12 @@ fn raa_no_response_awaiting_raa_state() { // then restore channel monitor updates. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented responses to RAA"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1094,9 +1092,9 @@ fn raa_no_response_awaiting_raa_state() { expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], payment_hash_3, 1000000); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3, 1_000_000); } #[test] @@ -1107,8 +1105,8 @@ fn claim_while_disconnected_monitor_update_fail() { // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling // code introduced a regression in this test (specifically, this caught a removal of the // channel_reestablish handling ensuring the order was sensical given the messages used). - let mut nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); + let mut nodes = create_network(2, &[None, None]); + create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); // Forward a payment for B to claim let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); @@ -1116,7 +1114,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + assert!(nodes[1].node.claim_funds(payment_preimage_1, 1_000_000)); check_added_monitors!(nodes[1], 1); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); @@ -1132,7 +1130,7 @@ fn claim_while_disconnected_monitor_update_fail() { // update. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1147,7 +1145,7 @@ fn claim_while_disconnected_monitor_update_fail() { let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC @@ -1213,7 +1211,7 @@ fn claim_while_disconnected_monitor_update_fail() { _ => panic!("Unexpected event"), } - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); } #[test] @@ -1222,8 +1220,8 @@ fn monitor_failed_no_reestablish_response() { // response to a commitment_signed. // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing // debug_assert!() failure in channel_reestablish handling. - let mut nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); + let mut nodes = create_network(2, &[None, None]); + create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); // Route the payment and deliver the initial commitment_signed (with a monitor update failure // on receipt). @@ -1237,7 +1235,7 @@ fn monitor_failed_no_reestablish_response() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1273,7 +1271,7 @@ fn monitor_failed_no_reestablish_response() { expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], payment_hash_1, 1000000); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000); } #[test] @@ -1288,8 +1286,8 @@ fn first_message_on_recv_ordering() { // have no pending response but will want to send a RAA/CS (with the updates for the second // payment applied). // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here. - let mut nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); + let mut nodes = create_network(2, &[None, None]); + create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); // Route the first payment outbound, holding the last RAA for B until we are set up so that we // can deliver it and fail the monitor update. @@ -1329,7 +1327,7 @@ fn first_message_on_recv_ordering() { // Deliver the final RAA for the first payment, which does not require a response. RAAs // generally require a commitment_signed, so the fact that we're expecting an opposite response // to the next message also tests resetting the delivery order. - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1338,7 +1336,7 @@ fn first_message_on_recv_ordering() { // RAA/CS response, which should be generated when we call test_restore_channel_monitor (with // the appropriate HTLC acceptance). nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } @@ -1362,8 +1360,8 @@ fn first_message_on_recv_ordering() { expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], payment_hash_2, 1000000); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); } #[test] @@ -1373,17 +1371,17 @@ fn test_monitor_update_fail_claim() { // update to claim the payment. We then send a payment C->B->A, making the forward of this // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor // updating and claim the payment on B. - let mut nodes = create_network(3); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - create_announced_chan_between_nodes(&nodes, 1, 2); + let mut nodes = create_network(3, &[None, None, None]); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); + create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new()); // Rebalance a bit so that we can send backwards from 3 to 2. - send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000); let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + assert!(nodes[1].node.claim_funds(payment_preimage_1, 1_000_000)); check_added_monitors!(nodes[1], 1); let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); @@ -1443,15 +1441,15 @@ fn test_monitor_update_on_pending_forwards() { // We do this with a simple 3-node network, sending a payment from A to C and one from C to A. // The payment from A to C will be failed by C and pending a back-fail to A, while the payment // from C to A will be pending a forward to A. - let mut nodes = create_network(3); - create_announced_chan_between_nodes(&nodes, 0, 1); - create_announced_chan_between_nodes(&nodes, 1, 2); + let mut nodes = create_network(3, &[None, None, None]); + create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); + create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new()); // Rebalance a bit so that we can send backwards from 3 to 1. - send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000); let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); - assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, 1000000)); + assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1)); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(nodes[2], 1); @@ -1495,11 +1493,10 @@ fn test_monitor_update_on_pending_forwards() { Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; - nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now(); nodes[0].node.process_pending_htlc_forwards(); expect_payment_received!(nodes[0], payment_hash_2, 1000000); - claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2); + claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2, 1_000_000); } #[test] @@ -1508,8 +1505,8 @@ fn monitor_update_claim_fail_no_response() { // to channel being AwaitingRAA). // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling // code was broken. - let mut nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); + let mut nodes = create_network(2, &[None, None]); + create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); // Forward a payment for B to claim let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); @@ -1527,7 +1524,7 @@ fn monitor_update_claim_fail_no_response() { let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + assert!(nodes[1].node.claim_funds(payment_preimage_1, 1_000_000)); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1554,5 +1551,134 @@ fn monitor_update_claim_fail_no_response() { _ => panic!("Unexpected event"), } - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); +} + +// Note that restore_between_fails with !fail_on_generate is useless +// Also note that !fail_on_generate && !fail_on_signed is useless +// Finally, note that !fail_on_signed is not possible with fail_on_generate && !restore_between_fails +// confirm_a_first and restore_b_before_conf are wholly unrelated to earlier bools and +// restore_b_before_conf has no meaning if !confirm_a_first +fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: bool, fail_on_signed: bool, confirm_a_first: bool, restore_b_before_conf: bool) { + // Test that if the monitor update generated by funding_transaction_generated fails we continue + // the channel setup happily after the update is restored. + let mut nodes = create_network(2, &[None, None]); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43).unwrap(); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())).unwrap(); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())).unwrap(); + + let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 43); + + if fail_on_generate { + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + } + nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output); + check_added_monitors!(nodes[0], 1); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())).unwrap(); + check_added_monitors!(nodes[1], 1); + + if restore_between_fails { + assert!(fail_on_generate); + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + } + + if fail_on_signed { + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + } else { + assert!(restore_between_fails || !fail_on_generate); // We can't switch to good now (there's no monitor update) + assert!(fail_on_generate); // Somebody has to fail + } + let funding_signed_res = nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); + if fail_on_signed || !restore_between_fails { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = funding_signed_res.unwrap_err() { + if fail_on_generate && !restore_between_fails { + assert_eq!(err, "Previous monitor update failure prevented funding_signed from allowing funding broadcast"); + check_added_monitors!(nodes[0], 0); + } else { + assert_eq!(err, "Failed to update ChannelMonitor"); + check_added_monitors!(nodes[0], 1); + } + } else { panic!(); } + + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + } else { + funding_signed_res.unwrap(); + } + + check_added_monitors!(nodes[0], 1); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => { + assert_eq!(user_channel_id, 43); + assert_eq!(*funding_txo, funding_output); + }, + _ => panic!("Unexpected event"), + }; + + if confirm_a_first { + confirm_transaction(&nodes[0].chain_monitor, &funding_tx, funding_tx.version); + nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id())).unwrap(); + } else { + assert!(!restore_b_before_conf); + confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + } + + // Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + if !restore_b_before_conf { + confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + } + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + + let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { + nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id())).unwrap(); + + confirm_transaction(&nodes[0].chain_monitor, &funding_tx, funding_tx.version); + let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]); + (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked)) + } else { + if restore_b_before_conf { + confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version); + } + let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]); + (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked)) + }; + for node in nodes.iter() { + assert!(node.router.handle_channel_announcement(&announcement).unwrap()); + node.router.handle_channel_update(&as_update).unwrap(); + node.router.handle_channel_update(&bs_update).unwrap(); + } + + send_payment(&nodes[0], &[&nodes[1]], 8000000, 8_000_000); + close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true); +} + +#[test] +fn during_funding_monitor_fail() { + do_during_funding_monitor_fail(false, false, true, true, true); + do_during_funding_monitor_fail(true, false, true, false, false); + do_during_funding_monitor_fail(true, true, true, true, false); + do_during_funding_monitor_fail(true, true, false, false, false); }