Make field error of LightingError mandatory
[rust-lightning] / src / ln / chanmon_update_fail_tests.rs
index c915307385f4731f02852e035ca5bbfc68b01013..4b8490c5b227b8c7b506ae2cc699135ffbe23f46 100644 (file)
@@ -6,22 +6,20 @@
 use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash};
 use ln::channelmonitor::ChannelMonitorUpdateErr;
 use ln::msgs;
-use ln::msgs::ChannelMessageHandler;
+use ln::msgs::{ChannelMessageHandler, LocalFeatures, RoutingMessageHandler};
 use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 use util::errors::APIError;
 
 use bitcoin_hashes::sha256::Hash as Sha256;
 use bitcoin_hashes::Hash;
 
-use std::time::Instant;
-
 use ln::functional_test_utils::*;
 
 #[test]
 fn test_simple_monitor_permanent_update_fail() {
        // Test that we handle a simple permanent monitor update failure
-       let mut nodes = create_network(2);
-       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let mut nodes = create_network(2, &[None, None]);
+       create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
        let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
@@ -50,8 +48,8 @@ fn test_simple_monitor_permanent_update_fail() {
 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
        // Test that we can recover from a simple temporary monitor update failure optionally with
        // a disconnect in between
-       let mut nodes = create_network(2);
-       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let mut nodes = create_network(2, &[None, None]);
+       create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
        let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
@@ -149,8 +147,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        // * We then walk through more message exchanges to get the original update_add_htlc
        //   through, swapping message ordering based on disconnect_count & 8 and optionally
        //   disconnect/reconnecting based on disconnect_count.
-       let mut nodes = create_network(2);
-       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let mut nodes = create_network(2, &[None, None]);
+       create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
@@ -192,7 +190,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
                                        _ => panic!("Unexpected event"),
                                }
 
-                               if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) {
+                               if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) {
                                        assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
                                } else { panic!(); }
                        }
@@ -475,8 +473,8 @@ fn test_monitor_temporary_update_fail_c() {
 #[test]
 fn test_monitor_update_fail_cs() {
        // Tests handling of a monitor update failure when processing an incoming commitment_signed
-       let mut nodes = create_network(2);
-       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let mut nodes = create_network(2, &[None, None]);
+       create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
        let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
@@ -487,7 +485,7 @@ fn test_monitor_update_fail_cs() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
 
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
-       if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() {
+       if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() {
                assert_eq!(err, "Failed to update ChannelMonitor");
        } else { panic!(); }
        check_added_monitors!(nodes[1], 1);
@@ -517,7 +515,7 @@ fn test_monitor_update_fail_cs() {
                        assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 
                        *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
-                       if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() {
+                       if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() {
                                assert_eq!(err, "Failed to update ChannelMonitor");
                        } else { panic!(); }
                        check_added_monitors!(nodes[0], 1);
@@ -554,8 +552,8 @@ fn test_monitor_update_fail_no_rebroadcast() {
        // Tests handling of a monitor update failure when no message rebroadcasting on
        // test_restore_channel_monitor() is required. Backported from
        // chanmon_fail_consistency fuzz tests.
-       let mut nodes = create_network(2);
-       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let mut nodes = create_network(2, &[None, None]);
+       create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
        let (payment_preimage_1, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
@@ -567,7 +565,7 @@ fn test_monitor_update_fail_no_rebroadcast() {
        let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
 
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
-       if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() {
+       if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() {
                assert_eq!(err, "Failed to update ChannelMonitor");
        } else { panic!(); }
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -596,8 +594,8 @@ fn test_monitor_update_fail_no_rebroadcast() {
 fn test_monitor_update_raa_while_paused() {
        // Tests handling of an RAA while monitor updating has already been marked failed.
        // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
-       let mut nodes = create_network(2);
-       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let mut nodes = create_network(2, &[None, None]);
+       create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        send_payment(&nodes[0], &[&nodes[1]], 5000000);
 
@@ -620,12 +618,12 @@ fn test_monitor_update_raa_while_paused() {
 
        *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]).unwrap();
-       if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() {
+       if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() {
                assert_eq!(err, "Failed to update ChannelMonitor");
        } else { panic!(); }
        check_added_monitors!(nodes[0], 1);
 
-       if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() {
+       if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() {
                assert_eq!(err, "Previous monitor update failure prevented responses to RAA");
        } else { panic!(); }
        check_added_monitors!(nodes[0], 1);
@@ -663,9 +661,9 @@ fn test_monitor_update_raa_while_paused() {
 
 fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        // Tests handling of a monitor update failure when processing an incoming RAA
-       let mut nodes = create_network(3);
-       create_announced_chan_between_nodes(&nodes, 0, 1);
-       let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
+       let mut nodes = create_network(3, &[None, None, None]);
+       create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+       let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 
        // Rebalance a bit so that we can send backwards from 2 to 1.
        send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
@@ -674,7 +672,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
 
        // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
-       assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, 0));
+       assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
        expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 1);
 
@@ -706,7 +704,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
        // Now fail monitor updating.
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
-       if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() {
+       if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() {
                assert_eq!(err, "Failed to update ChannelMonitor");
        } else { panic!(); }
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
@@ -770,7 +768,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
                send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
                nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
-               if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) {
+               if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) {
                        assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
                } else { panic!(); }
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -916,9 +914,9 @@ fn test_monitor_update_fail_reestablish() {
        // Simple test for message retransmission after monitor update failure on
        // channel_reestablish generating a monitor update (which comes from freeing holding cell
        // HTLCs).
-       let mut nodes = create_network(3);
-       create_announced_chan_between_nodes(&nodes, 0, 1);
-       create_announced_chan_between_nodes(&nodes, 1, 2);
+       let mut nodes = create_network(3, &[None, None, None]);
+       create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+       create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
 
        let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
 
@@ -947,7 +945,7 @@ fn test_monitor_update_fail_reestablish() {
 
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap();
 
-       if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() {
+       if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() {
                assert_eq!(err, "Failed to update ChannelMonitor");
        } else { panic!(); }
        check_added_monitors!(nodes[1], 1);
@@ -994,8 +992,8 @@ fn raa_no_response_awaiting_raa_state() {
        // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
        // in question (assuming it intends to respond with a CS after monitor updating is restored).
        // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
-       let mut nodes = create_network(2);
-       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let mut nodes = create_network(2, &[None, None]);
+       create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
        let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
@@ -1035,12 +1033,12 @@ fn raa_no_response_awaiting_raa_state() {
        // then restore channel monitor updates.
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
-       if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
+       if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
                assert_eq!(err, "Failed to update ChannelMonitor");
        } else { panic!(); }
        check_added_monitors!(nodes[1], 1);
 
-       if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
+       if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
                assert_eq!(err, "Previous monitor update failure prevented responses to RAA");
        } else { panic!(); }
        check_added_monitors!(nodes[1], 1);
@@ -1107,8 +1105,8 @@ fn claim_while_disconnected_monitor_update_fail() {
        // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
        // code introduced a regression in this test (specifically, this caught a removal of the
        // channel_reestablish handling ensuring the order was sensical given the messages used).
-       let mut nodes = create_network(2);
-       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let mut nodes = create_network(2, &[None, None]);
+       create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        // Forward a payment for B to claim
        let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
@@ -1132,7 +1130,7 @@ fn claim_while_disconnected_monitor_update_fail() {
        // update.
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 
-       if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() {
+       if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() {
                assert_eq!(err, "Failed to update ChannelMonitor");
        } else { panic!(); }
        check_added_monitors!(nodes[1], 1);
@@ -1147,7 +1145,7 @@ fn claim_while_disconnected_monitor_update_fail() {
 
        let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]).unwrap();
-       if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() {
+       if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() {
                assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
        } else { panic!(); }
        // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
@@ -1222,8 +1220,8 @@ fn monitor_failed_no_reestablish_response() {
        // response to a commitment_signed.
        // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
        // debug_assert!() failure in channel_reestablish handling.
-       let mut nodes = create_network(2);
-       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let mut nodes = create_network(2, &[None, None]);
+       create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        // Route the payment and deliver the initial commitment_signed (with a monitor update failure
        // on receipt).
@@ -1237,7 +1235,7 @@ fn monitor_failed_no_reestablish_response() {
        assert_eq!(events.len(), 1);
        let payment_event = SendEvent::from_event(events.pop().unwrap());
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
-       if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
+       if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
                assert_eq!(err, "Failed to update ChannelMonitor");
        } else { panic!(); }
        check_added_monitors!(nodes[1], 1);
@@ -1288,8 +1286,8 @@ fn first_message_on_recv_ordering() {
        // have no pending response but will want to send a RAA/CS (with the updates for the second
        // payment applied).
        // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
-       let mut nodes = create_network(2);
-       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let mut nodes = create_network(2, &[None, None]);
+       create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        // Route the first payment outbound, holding the last RAA for B until we are set up so that we
        // can deliver it and fail the monitor update.
@@ -1329,7 +1327,7 @@ fn first_message_on_recv_ordering() {
        // Deliver the final RAA for the first payment, which does not require a response. RAAs
        // generally require a commitment_signed, so the fact that we're expecting an opposite response
        // to the next message also tests resetting the delivery order.
-       if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
+       if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
                assert_eq!(err, "Failed to update ChannelMonitor");
        } else { panic!(); }
        check_added_monitors!(nodes[1], 1);
@@ -1338,7 +1336,7 @@ fn first_message_on_recv_ordering() {
        // RAA/CS response, which should be generated when we call test_restore_channel_monitor (with
        // the appropriate HTLC acceptance).
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
-       if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
+       if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
                assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
        } else { panic!(); }
 
@@ -1365,3 +1363,322 @@ fn first_message_on_recv_ordering() {
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
 }
+
+#[test]
+fn test_monitor_update_fail_claim() {
+       // Basic test for monitor update failures when processing claim_funds calls.
+       // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
+       // update to claim the payment. We then send a payment C->B->A, making the forward of this
+       // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
+       // updating and claim the payment on B.
+       let mut nodes = create_network(3, &[None, None, None]);
+       let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+       create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+       // Rebalance a bit so that we can send backwards from 3 to 2.
+       send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
+
+       let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       assert!(nodes[1].node.claim_funds(payment_preimage_1));
+       check_added_monitors!(nodes[1], 1);
+
+       let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+       let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+       nodes[2].node.send_payment(route, payment_hash_2).unwrap();
+       check_added_monitors!(nodes[2], 1);
+
+       // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
+       // paused, so forward shouldn't succeed until we call test_restore_channel_monitor().
+       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+
+       let mut events = nodes[2].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let payment_event = SendEvent::from_event(events.pop().unwrap());
+       nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+       commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
+
+       let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+       nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]).unwrap();
+       commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true);
+
+       let msg_events = nodes[2].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 1);
+       match msg_events[0] {
+               MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
+                       assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
+                       assert_eq!(msg.contents.flags & 2, 2); // temp disabled
+               },
+               _ => panic!("Unexpected event"),
+       }
+
+       let events = nodes[2].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
+               assert_eq!(payment_hash, payment_hash_2);
+               assert!(!rejected_by_dest);
+       } else { panic!("Unexpected event!"); }
+
+       // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
+       nodes[1].node.test_restore_channel_monitor();
+       check_added_monitors!(nodes[1], 1);
+
+       let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]).unwrap();
+       commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       if let Event::PaymentSent { payment_preimage, .. } = events[0] {
+               assert_eq!(payment_preimage, payment_preimage_1);
+       } else { panic!("Unexpected event!"); }
+}
+
+#[test]
+fn test_monitor_update_on_pending_forwards() {
+       // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
+       // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
+       // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
+       // from C to A will be pending a forward to A.
+       let mut nodes = create_network(3, &[None, None, None]);
+       create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+       create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+       // Rebalance a bit so that we can send backwards from 3 to 1.
+       send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
+
+       let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+       assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
+       expect_pending_htlcs_forwardable!(nodes[2]);
+       check_added_monitors!(nodes[2], 1);
+
+       let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]).unwrap();
+       commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+       let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+       nodes[2].node.send_payment(route, payment_hash_2).unwrap();
+       check_added_monitors!(nodes[2], 1);
+
+       let mut events = nodes[2].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let payment_event = SendEvent::from_event(events.pop().unwrap());
+       nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+       commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
+
+       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       check_added_monitors!(nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+       nodes[1].node.test_restore_channel_monitor();
+       check_added_monitors!(nodes[1], 1);
+
+       let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]).unwrap();
+       nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]).unwrap();
+       commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 2);
+       if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
+               assert_eq!(payment_hash, payment_hash_1);
+               assert!(rejected_by_dest);
+       } else { panic!("Unexpected event!"); }
+       match events[1] {
+               Event::PendingHTLCsForwardable { .. } => { },
+               _ => panic!("Unexpected event"),
+       };
+       nodes[0].node.process_pending_htlc_forwards();
+       expect_payment_received!(nodes[0], payment_hash_2, 1000000);
+
+       claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
+}
+
+#[test]
+fn monitor_update_claim_fail_no_response() {
+       // Test for claim_funds resulting in both a monitor update failure and no message response (due
+       // to channel being AwaitingRAA).
+       // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
+       // code was broken.
+       let mut nodes = create_network(2, &[None, None]);
+       create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+       // Forward a payment for B to claim
+       let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+       // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
+       let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+       let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+       nodes[0].node.send_payment(route, payment_hash_2).unwrap();
+       check_added_monitors!(nodes[0], 1);
+
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let payment_event = SendEvent::from_event(events.pop().unwrap());
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+       let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
+
+       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       assert!(nodes[1].node.claim_funds(payment_preimage_1));
+       check_added_monitors!(nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+       nodes[1].node.test_restore_channel_monitor();
+       check_added_monitors!(nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+       check_added_monitors!(nodes[1], 1);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_payment_received!(nodes[1], payment_hash_2, 1000000);
+
+       let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]).unwrap();
+       commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::PaymentSent { ref payment_preimage } => {
+                       assert_eq!(*payment_preimage, payment_preimage_1);
+               },
+               _ => panic!("Unexpected event"),
+       }
+
+       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+}
+
+// Note that restore_between_fails with !fail_on_generate is useless
+// Also note that !fail_on_generate && !fail_on_signed is useless
+// Finally, note that !fail_on_signed is not possible with fail_on_generate && !restore_between_fails
+// confirm_a_first and restore_b_before_conf are wholly unrelated to earlier bools and
+// restore_b_before_conf has no meaning if !confirm_a_first
+fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: bool, fail_on_signed: bool, confirm_a_first: bool, restore_b_before_conf: bool) {
+       // Test that if the monitor update generated by funding_transaction_generated fails we continue
+       // the channel setup happily after the update is restored.
+       let mut nodes = create_network(2, &[None, None]);
+
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43).unwrap();
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())).unwrap();
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())).unwrap();
+
+       let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 43);
+
+       if fail_on_generate {
+               *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       }
+       nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
+       check_added_monitors!(nodes[0], 1);
+
+       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())).unwrap();
+       check_added_monitors!(nodes[1], 1);
+
+       if restore_between_fails {
+               assert!(fail_on_generate);
+               *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+               nodes[0].node.test_restore_channel_monitor();
+               check_added_monitors!(nodes[0], 1);
+               assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+               assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+       }
+
+       if fail_on_signed {
+               *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       } else {
+               assert!(restore_between_fails || !fail_on_generate); // We can't switch to good now (there's no monitor update)
+               assert!(fail_on_generate); // Somebody has to fail
+       }
+       let funding_signed_res = nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
+       if fail_on_signed || !restore_between_fails {
+               if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = funding_signed_res.unwrap_err() {
+                       if fail_on_generate && !restore_between_fails {
+                               assert_eq!(err, "Previous monitor update failure prevented funding_signed from allowing funding broadcast");
+                               check_added_monitors!(nodes[0], 0);
+                       } else {
+                               assert_eq!(err, "Failed to update ChannelMonitor");
+                               check_added_monitors!(nodes[0], 1);
+                       }
+               } else { panic!(); }
+
+               assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+               *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+               nodes[0].node.test_restore_channel_monitor();
+       } else {
+               funding_signed_res.unwrap();
+       }
+
+       check_added_monitors!(nodes[0], 1);
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
+                       assert_eq!(user_channel_id, 43);
+                       assert_eq!(*funding_txo, funding_output);
+               },
+               _ => panic!("Unexpected event"),
+       };
+
+       if confirm_a_first {
+               confirm_transaction(&nodes[0].chain_monitor, &funding_tx, funding_tx.version);
+               nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id())).unwrap();
+       } else {
+               assert!(!restore_b_before_conf);
+               confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version);
+               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       }
+
+       // Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       if !restore_b_before_conf {
+               confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version);
+               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+               assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+       }
+
+       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+       nodes[1].node.test_restore_channel_monitor();
+       check_added_monitors!(nodes[1], 1);
+
+       let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
+               nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id())).unwrap();
+
+               confirm_transaction(&nodes[0].chain_monitor, &funding_tx, funding_tx.version);
+               let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
+               (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked))
+       } else {
+               if restore_b_before_conf {
+                       confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version);
+               }
+               let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
+               (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked))
+       };
+       for node in nodes.iter() {
+               assert!(node.router.handle_channel_announcement(&announcement).unwrap());
+               node.router.handle_channel_update(&as_update).unwrap();
+               node.router.handle_channel_update(&bs_update).unwrap();
+       }
+
+       send_payment(&nodes[0], &[&nodes[1]], 8000000);
+       close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
+}
+
+#[test]
+fn during_funding_monitor_fail() {
+       do_during_funding_monitor_fail(false, false, true, true, true);
+       do_during_funding_monitor_fail(true, false, true, false, false);
+       do_during_funding_monitor_fail(true, true, true, true, false);
+       do_during_funding_monitor_fail(true, true, false, false, false);
+}