Handle monitor update failures during funding on the funder side
[rust-lightning] / src / ln / chanmon_update_fail_tests.rs
index 5ff531cb9dcce32575ed7391ee70262341830d0f..4803b0054e5d466bc6aaea5c4f43489681bac9f9 100644 (file)
@@ -6,7 +6,7 @@
 use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash};
 use ln::channelmonitor::ChannelMonitorUpdateErr;
 use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, LocalFeatures};
+use ln::msgs::{ChannelMessageHandler, LocalFeatures, RoutingMessageHandler};
 use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 use util::errors::APIError;
 
@@ -1553,3 +1553,92 @@ fn monitor_update_claim_fail_no_response() {
 
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
 }
+
+// Note that restore_between_fails with !fail_on_generate is useless
+// Also note that !fail_on_generate && !fail_on_signed is useless
+// Finally, note that !fail_on_signed is not possible with fail_on_generate && !restore_between_fails
+fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: bool, fail_on_signed: bool) {
+       // Test that if the monitor update generated by funding_transaction_generated fails we continue
+       // the channel setup happily after the update is restored.
+       let mut nodes = create_network(2, &[None, None]);
+
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43).unwrap();
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())).unwrap();
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())).unwrap();
+
+       let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 43);
+
+       if fail_on_generate {
+               *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       }
+       nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
+       check_added_monitors!(nodes[0], 1);
+
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())).unwrap();
+       check_added_monitors!(nodes[1], 1);
+
+       if restore_between_fails {
+               assert!(fail_on_generate);
+               *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+               nodes[0].node.test_restore_channel_monitor();
+               check_added_monitors!(nodes[0], 1);
+               assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+               assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+       }
+
+       if fail_on_signed {
+               *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       } else {
+               assert!(restore_between_fails || !fail_on_generate); // We can't switch to good now (there's no monitor update)
+               assert!(fail_on_generate); // Somebody has to fail
+       }
+       let funding_signed_res = nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
+       if fail_on_signed || !restore_between_fails {
+               if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = funding_signed_res.unwrap_err() {
+                       if fail_on_generate && !restore_between_fails {
+                               assert_eq!(err, "Previous monitor update failure prevented funding_signed from allowing funding broadcast");
+                               check_added_monitors!(nodes[0], 0);
+                       } else {
+                               assert_eq!(err, "Failed to update ChannelMonitor");
+                               check_added_monitors!(nodes[0], 1);
+                       }
+               } else { panic!(); }
+
+               assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+               *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+               nodes[0].node.test_restore_channel_monitor();
+       } else {
+               funding_signed_res.unwrap();
+       }
+
+       check_added_monitors!(nodes[0], 1);
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
+                       assert_eq!(user_channel_id, 43);
+                       assert_eq!(*funding_txo, funding_output);
+               },
+               _ => panic!("Unexpected event"),
+       };
+
+       let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
+       let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
+       for node in nodes.iter() {
+               assert!(node.router.handle_channel_announcement(&announcement).unwrap());
+               node.router.handle_channel_update(&as_update).unwrap();
+               node.router.handle_channel_update(&bs_update).unwrap();
+       }
+
+       send_payment(&nodes[0], &[&nodes[1]], 8000000);
+       close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
+}
+
+#[test]
+fn during_funding_monitor_fail() {
+       do_during_funding_monitor_fail(false, false, true);
+       do_during_funding_monitor_fail(true, false, true);
+       do_during_funding_monitor_fail(true, true, true);
+       do_during_funding_monitor_fail(true, true, false);
+}