use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash};
use ln::channelmonitor::ChannelMonitorUpdateErr;
use ln::msgs;
-use ln::msgs::ChannelMessageHandler;
+use ln::msgs::{ChannelMessageHandler, LocalFeatures};
use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
use util::errors::APIError;
use bitcoin_hashes::sha256::Hash as Sha256;
use bitcoin_hashes::Hash;
-use std::time::Instant;
-
use ln::functional_test_utils::*;
#[test]
fn test_simple_monitor_permanent_update_fail() {
// Test that we handle a simple permanent monitor update failure
let mut nodes = create_network(2);
- create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
// Test that we can recover from a simple temporary monitor update failure optionally with
// a disconnect in between
let mut nodes = create_network(2);
- create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
// through, swapping message ordering based on disconnect_count & 8 and optionally
// disconnect/reconnecting based on disconnect_count.
let mut nodes = create_network(2);
- create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
fn test_monitor_update_fail_cs() {
// Tests handling of a monitor update failure when processing an incoming commitment_signed
let mut nodes = create_network(2);
- create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
// test_restore_channel_monitor() is required. Backported from
// chanmon_fail_consistency fuzz tests.
let mut nodes = create_network(2);
- create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
let (payment_preimage_1, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
// Tests handling of an RAA while monitor updating has already been marked failed.
// Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
let mut nodes = create_network(2);
- create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
send_payment(&nodes[0], &[&nodes[1]], 5000000);
fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
// Tests handling of a monitor update failure when processing an incoming RAA
let mut nodes = create_network(3);
- create_announced_chan_between_nodes(&nodes, 0, 1);
- let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
// Rebalance a bit so that we can send backwards from 2 to 1.
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
// Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
- assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, 0));
+ assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors!(nodes[2], 1);
// channel_reestablish generating a monitor update (which comes from freeing holding cell
// HTLCs).
let mut nodes = create_network(3);
- create_announced_chan_between_nodes(&nodes, 0, 1);
- create_announced_chan_between_nodes(&nodes, 1, 2);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
_ => panic!("Unexpected event"),
}
}
+
+#[test]
+fn raa_no_response_awaiting_raa_state() {
+ // This is a rather convoluted test which ensures that if handling of an RAA does not happen
+ // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
+ // in question (assuming it intends to respond with a CS after monitor updating is restored).
+ // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
+ let mut nodes = create_network(2);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ let (payment_preimage_3, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
+
+ // Queue up two payments - one will be delivered right away, one immediately goes into the
+ // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
+ // immediately after a CS. By setting failing the monitor update failure from the CS (which
+ // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
+ // generation during RAA while in monitor-update-failed state.
+ nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap();
+ check_added_monitors!(nodes[0], 0);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
+ // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
+ // then restore channel monitor updates.
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ } else { panic!(); }
+ check_added_monitors!(nodes[1], 1);
+
+ if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
+ assert_eq!(err, "Previous monitor update failure prevented responses to RAA");
+ } else { panic!(); }
+ check_added_monitors!(nodes[1], 1);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ // nodes[1] should be AwaitingRAA here!
+ check_added_monitors!(nodes[1], 1);
+ let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_1, 1000000);
+
+ // We send a third payment here, which is somewhat of a redundant test, but the
+ // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
+ // commitment transaction states) whereas here we can explicitly check for it.
+ nodes[0].node.send_payment(route.clone(), payment_hash_3).unwrap();
+ check_added_monitors!(nodes[0], 0);
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+ // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_2, 1000000);
+ let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_3, 1000000);
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
+}
+
+#[test]
+fn claim_while_disconnected_monitor_update_fail() {
+ // Test for claiming a payment while disconnected and then having the resulting
+ // channel-update-generated monitor update fail. This kind of thing isn't a particularly
+ // contrived case for nodes with network instability.
+ // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
+ // code introduced a regression in this test (specifically, this caught a removal of the
+ // channel_reestablish handling ensuring the order was sensical given the messages used).
+ let mut nodes = create_network(2);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ // Forward a payment for B to claim
+ let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ assert!(nodes[1].node.claim_funds(payment_preimage_1));
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+
+ let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
+ let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
+ // update.
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+
+ if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ } else { panic!(); }
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
+ // the monitor still failed
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, payment_hash_2).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]).unwrap();
+ if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() {
+ assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
+ } else { panic!(); }
+ // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
+ // until we've test_restore_channel_monitor'd and updated for the new commitment transaction.
+
+ // Now un-fail the monitor, which will result in B sending its original commitment update,
+ // receiving the commitment update from A, and the resulting commitment dances.
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(bs_msgs.len(), 2);
+
+ match bs_msgs[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ match bs_msgs[1] {
+ MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_2, 1000000);
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(*payment_preimage, payment_preimage_1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+}
+
+#[test]
+fn monitor_failed_no_reestablish_response() {
+ // Test for receiving a channel_reestablish after a monitor update failure resulted in no
+ // response to a commitment_signed.
+ // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
+ // debug_assert!() failure in channel_reestablish handling.
+ let mut nodes = create_network(2);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ // Route the payment and deliver the initial commitment_signed (with a monitor update failure
+ // on receipt).
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, payment_hash_1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ } else { panic!(); }
+ check_added_monitors!(nodes[1], 1);
+
+ // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
+ // is still failing to update monitors.
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+
+ let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
+ let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap();
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect).unwrap();
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+ let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_1, 1000000);
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
+}
+
+#[test]
+fn first_message_on_recv_ordering() {
+ // Test that if the initial generator of a monitor-update-frozen state doesn't generate
+ // messages, we're willing to flip the order of response messages if neccessary in resposne to
+ // a commitment_signed which needs to send an RAA first.
+ // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
+ // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
+ // response. To do this, we start routing two payments, with the final RAA for the first being
+ // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
+ // have no pending response but will want to send a RAA/CS (with the updates for the second
+ // payment applied).
+ // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
+ let mut nodes = create_network(2);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ // Route the first payment outbound, holding the last RAA for B until we are set up so that we
+ // can deliver it and fail the monitor update.
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, payment_hash_1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ // Route the second payment, generating an update_add_htlc/commitment_signed
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, payment_hash_2).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+
+ // Deliver the final RAA for the first payment, which does not require a response. RAAs
+ // generally require a commitment_signed, so the fact that we're expecting an opposite response
+ // to the next message also tests resetting the delivery order.
+ if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ } else { panic!(); }
+ check_added_monitors!(nodes[1], 1);
+
+ // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
+ // RAA/CS response, which should be generated when we call test_restore_channel_monitor (with
+ // the appropriate HTLC acceptance).
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
+ assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
+ } else { panic!(); }
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_1, 1000000);
+
+ let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_2, 1000000);
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+}
+
+#[test]
+fn test_monitor_update_fail_claim() {
+ // Basic test for monitor update failures when processing claim_funds calls.
+ // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
+ // update to claim the payment. We then send a payment C->B->A, making the forward of this
+ // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
+ // updating and claim the payment on B.
+ let mut nodes = create_network(3);
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ // Rebalance a bit so that we can send backwards from 3 to 2.
+ send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
+
+ let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ assert!(nodes[1].node.claim_funds(payment_preimage_1));
+ check_added_monitors!(nodes[1], 1);
+
+ let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ nodes[2].node.send_payment(route, payment_hash_2).unwrap();
+ check_added_monitors!(nodes[2], 1);
+
+ // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
+ // paused, so forward shouldn't succeed until we call test_restore_channel_monitor().
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+
+ let mut events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
+
+ let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+ nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true);
+
+ let msg_events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ match msg_events[0] {
+ MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
+ assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
+ assert_eq!(msg.contents.flags & 2, 2); // temp disabled
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let events = nodes[2].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
+ assert_eq!(payment_hash, payment_hash_2);
+ assert!(!rejected_by_dest);
+ } else { panic!("Unexpected event!"); }
+
+ // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ if let Event::PaymentSent { payment_preimage, .. } = events[0] {
+ assert_eq!(payment_preimage, payment_preimage_1);
+ } else { panic!("Unexpected event!"); }
+}
+
+#[test]
+fn test_monitor_update_on_pending_forwards() {
+ // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
+ // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
+ // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
+ // from C to A will be pending a forward to A.
+ let mut nodes = create_network(3);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ // Rebalance a bit so that we can send backwards from 3 to 1.
+ send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
+
+ let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+ assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors!(nodes[2], 1);
+
+ let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ nodes[2].node.send_payment(route, payment_hash_2).unwrap();
+ check_added_monitors!(nodes[2], 1);
+
+ let mut events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]).unwrap();
+ nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
+ assert_eq!(payment_hash, payment_hash_1);
+ assert!(rejected_by_dest);
+ } else { panic!("Unexpected event!"); }
+ match events[1] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+ nodes[0].node.process_pending_htlc_forwards();
+ expect_payment_received!(nodes[0], payment_hash_2, 1000000);
+
+ claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
+}
+
+#[test]
+fn monitor_update_claim_fail_no_response() {
+ // Test for claim_funds resulting in both a monitor update failure and no message response (due
+ // to channel being AwaitingRAA).
+ // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
+ // code was broken.
+ let mut nodes = create_network(2);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ // Forward a payment for B to claim
+ let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, payment_hash_2).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ assert!(nodes[1].node.claim_funds(payment_preimage_1));
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_2, 1000000);
+
+ let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(*payment_preimage, payment_preimage_1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+}