X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchanmon_update_fail_tests.rs;h=c1fe6fbdd6d7c54002d31f277481a200f5cb5b7b;hb=63c1fa6db52723e54b001e816d7c15b4478d9f73;hp=4803b0054e5d466bc6aaea5c4f43489681bac9f9;hpb=a1e0ca410ec29db14f79c5319a66da2b4f428982;p=rust-lightning diff --git a/src/ln/chanmon_update_fail_tests.rs b/src/ln/chanmon_update_fail_tests.rs index 4803b005..c1fe6fbd 100644 --- a/src/ln/chanmon_update_fail_tests.rs +++ b/src/ln/chanmon_update_fail_tests.rs @@ -91,7 +91,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { _ => panic!("Unexpected event"), } - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000); // Now set it to failed again... let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); @@ -166,7 +166,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1] // but nodes[0] won't respond since it is frozen. - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + assert!(nodes[1].node.claim_funds(payment_preimage_1, 1_000_000)); check_added_monitors!(nodes[1], 1); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); @@ -190,7 +190,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } } @@ -440,7 +440,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); } #[test] @@ -485,7 +485,7 @@ fn test_monitor_update_fail_cs() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -515,7 +515,7 @@ fn test_monitor_update_fail_cs() { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[0], 1); @@ -544,7 +544,7 @@ fn test_monitor_update_fail_cs() { _ => panic!("Unexpected event"), }; - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage, 1_000_000); } #[test] @@ -565,7 +565,7 @@ fn test_monitor_update_fail_no_rebroadcast() { let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -587,7 +587,7 @@ fn test_monitor_update_fail_no_rebroadcast() { _ => panic!("Unexpected event"), } - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000); } #[test] @@ -597,7 +597,7 @@ fn test_monitor_update_raa_while_paused() { let mut nodes = create_network(2, &[None, None]); create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()); - send_payment(&nodes[0], &[&nodes[1]], 5000000); + send_payment(&nodes[0], &[&nodes[1]], 5000000, 5_000_000); let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); let (payment_preimage_1, our_payment_hash_1) = get_payment_preimage_hash!(nodes[0]); @@ -618,12 +618,12 @@ fn test_monitor_update_raa_while_paused() { *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[0], 1); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented responses to RAA"); } else { panic!(); } check_added_monitors!(nodes[0], 1); @@ -655,8 +655,8 @@ fn test_monitor_update_raa_while_paused() { expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], our_payment_hash_1, 1000000); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); - claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000); + claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2, 1_000_000); } fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { @@ -666,7 +666,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new()); // Rebalance a bit so that we can send backwards from 2 to 1. - send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000); // Route a first payment that we'll fail backwards let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); @@ -704,7 +704,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Now fail monitor updating. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -768,7 +768,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) { + if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -897,10 +897,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()), _ => panic!("Unexpected event"), }; - claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap()); + claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap(), 1_000_000); } - claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2, 1_000_000); } #[test] @@ -923,7 +923,7 @@ fn test_monitor_update_fail_reestablish() { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - assert!(nodes[2].node.claim_funds(our_payment_preimage)); + assert!(nodes[2].node.claim_funds(our_payment_preimage, 1_000_000)); check_added_monitors!(nodes[2], 1); let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -945,7 +945,7 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1033,12 +1033,12 @@ fn raa_no_response_awaiting_raa_state() { // then restore channel monitor updates. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented responses to RAA"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1092,9 +1092,9 @@ fn raa_no_response_awaiting_raa_state() { expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], payment_hash_3, 1000000); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3, 1_000_000); } #[test] @@ -1114,7 +1114,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + assert!(nodes[1].node.claim_funds(payment_preimage_1, 1_000_000)); check_added_monitors!(nodes[1], 1); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); @@ -1130,7 +1130,7 @@ fn claim_while_disconnected_monitor_update_fail() { // update. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1145,7 +1145,7 @@ fn claim_while_disconnected_monitor_update_fail() { let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC @@ -1211,7 +1211,7 @@ fn claim_while_disconnected_monitor_update_fail() { _ => panic!("Unexpected event"), } - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); } #[test] @@ -1235,7 +1235,7 @@ fn monitor_failed_no_reestablish_response() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1271,7 +1271,7 @@ fn monitor_failed_no_reestablish_response() { expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], payment_hash_1, 1000000); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000); } #[test] @@ -1327,7 +1327,7 @@ fn first_message_on_recv_ordering() { // Deliver the final RAA for the first payment, which does not require a response. RAAs // generally require a commitment_signed, so the fact that we're expecting an opposite response // to the next message also tests resetting the delivery order. - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { assert_eq!(err, "Failed to update ChannelMonitor"); } else { panic!(); } check_added_monitors!(nodes[1], 1); @@ -1336,7 +1336,7 @@ fn first_message_on_recv_ordering() { // RAA/CS response, which should be generated when we call test_restore_channel_monitor (with // the appropriate HTLC acceptance). nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); } else { panic!(); } @@ -1360,8 +1360,8 @@ fn first_message_on_recv_ordering() { expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], payment_hash_2, 1000000); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1, 1_000_000); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); } #[test] @@ -1376,12 +1376,12 @@ fn test_monitor_update_fail_claim() { create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new()); // Rebalance a bit so that we can send backwards from 3 to 2. - send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000); let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + assert!(nodes[1].node.claim_funds(payment_preimage_1, 1_000_000)); check_added_monitors!(nodes[1], 1); let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); @@ -1446,7 +1446,7 @@ fn test_monitor_update_on_pending_forwards() { create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new()); // Rebalance a bit so that we can send backwards from 3 to 1. - send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000, 5_000_000); let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1)); @@ -1496,7 +1496,7 @@ fn test_monitor_update_on_pending_forwards() { nodes[0].node.process_pending_htlc_forwards(); expect_payment_received!(nodes[0], payment_hash_2, 1000000); - claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2); + claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2, 1_000_000); } #[test] @@ -1524,7 +1524,7 @@ fn monitor_update_claim_fail_no_response() { let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - assert!(nodes[1].node.claim_funds(payment_preimage_1)); + assert!(nodes[1].node.claim_funds(payment_preimage_1, 1_000_000)); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1551,13 +1551,15 @@ fn monitor_update_claim_fail_no_response() { _ => panic!("Unexpected event"), } - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); } // Note that restore_between_fails with !fail_on_generate is useless // Also note that !fail_on_generate && !fail_on_signed is useless // Finally, note that !fail_on_signed is not possible with fail_on_generate && !restore_between_fails -fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: bool, fail_on_signed: bool) { +// confirm_a_first and restore_b_before_conf are wholly unrelated to earlier bools and +// restore_b_before_conf has no meaning if !confirm_a_first +fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: bool, fail_on_signed: bool, confirm_a_first: bool, restore_b_before_conf: bool) { // Test that if the monitor update generated by funding_transaction_generated fails we continue // the channel setup happily after the update is restored. let mut nodes = create_network(2, &[None, None]); @@ -1574,6 +1576,7 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output); check_added_monitors!(nodes[0], 1); + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())).unwrap(); check_added_monitors!(nodes[1], 1); @@ -1594,7 +1597,7 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: } let funding_signed_res = nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); if fail_on_signed || !restore_between_fails { - if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = funding_signed_res.unwrap_err() { + if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = funding_signed_res.unwrap_err() { if fail_on_generate && !restore_between_fails { assert_eq!(err, "Previous monitor update failure prevented funding_signed from allowing funding broadcast"); check_added_monitors!(nodes[0], 0); @@ -1623,22 +1626,59 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: _ => panic!("Unexpected event"), }; - let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx); - let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked); + if confirm_a_first { + confirm_transaction(&nodes[0].chain_monitor, &funding_tx, funding_tx.version); + nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id())).unwrap(); + } else { + assert!(!restore_b_before_conf); + confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + } + + // Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + if !restore_b_before_conf { + confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + } + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + + let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { + nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id())).unwrap(); + + confirm_transaction(&nodes[0].chain_monitor, &funding_tx, funding_tx.version); + let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]); + (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked)) + } else { + if restore_b_before_conf { + confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version); + } + let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]); + (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked)) + }; for node in nodes.iter() { assert!(node.router.handle_channel_announcement(&announcement).unwrap()); node.router.handle_channel_update(&as_update).unwrap(); node.router.handle_channel_update(&bs_update).unwrap(); } - send_payment(&nodes[0], &[&nodes[1]], 8000000); + send_payment(&nodes[0], &[&nodes[1]], 8000000, 8_000_000); close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true); } #[test] fn during_funding_monitor_fail() { - do_during_funding_monitor_fail(false, false, true); - do_during_funding_monitor_fail(true, false, true); - do_during_funding_monitor_fail(true, true, true); - do_during_funding_monitor_fail(true, true, false); + do_during_funding_monitor_fail(false, false, true, true, true); + do_during_funding_monitor_fail(true, false, true, false, false); + do_during_funding_monitor_fail(true, true, true, true, false); + do_during_funding_monitor_fail(true, true, false, false, false); }