From: Matt Corallo <649246+TheBlueMatt@users.noreply.github.com> Date: Thu, 24 Jan 2019 18:16:33 +0000 (-0500) Subject: Merge pull request #288 from TheBlueMatt/2019-01-test-split-raa-flag-fix X-Git-Tag: v0.0.12~235 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=a6f0281017f1223ef961fb3a000915e37b5719b0;hp=d7ac68701f6d169438b0337919fde7c715b2e94c;p=rust-lightning Merge pull request #288 from TheBlueMatt/2019-01-test-split-raa-flag-fix Fix several more monitor-update-failed cases --- diff --git a/src/ln/chanmon_update_fail_tests.rs b/src/ln/chanmon_update_fail_tests.rs index cd1df026..c9153073 100644 --- a/src/ln/chanmon_update_fail_tests.rs +++ b/src/ln/chanmon_update_fail_tests.rs @@ -987,3 +987,381 @@ fn test_monitor_update_fail_reestablish() { _ => panic!("Unexpected event"), } } + +#[test] +fn raa_no_response_awaiting_raa_state() { + // This is a rather convoluted test which ensures that if handling of an RAA does not happen + // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel + // in question (assuming it intends to respond with a CS after monitor updating is restored). + // Backported from chanmon_fail_consistency fuzz tests as this used to be broken. + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + let (payment_preimage_3, payment_hash_3) = get_payment_preimage_hash!(nodes[0]); + + // Queue up two payments - one will be delivered right away, one immediately goes into the + // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA + // immediately after a CS. By setting failing the monitor update failure from the CS (which + // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS + // generation during RAA while in monitor-update-failed state. + nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap(); + check_added_monitors!(nodes[0], 1); + nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap(); + check_added_monitors!(nodes[0], 0); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + check_added_monitors!(nodes[1], 1); + + let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap(); + check_added_monitors!(nodes[0], 1); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from + // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA, + // then restore channel monitor updates. + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[1], 1); + + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { + assert_eq!(err, "Previous monitor update failure prevented responses to RAA"); + } else { panic!(); } + check_added_monitors!(nodes[1], 1); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + // nodes[1] should be AwaitingRAA here! + check_added_monitors!(nodes[1], 1); + let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_1, 1000000); + + // We send a third payment here, which is somewhat of a redundant test, but the + // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync + // commitment transaction states) whereas here we can explicitly check for it. + nodes[0].node.send_payment(route.clone(), payment_hash_3).unwrap(); + check_added_monitors!(nodes[0], 0); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap(); + check_added_monitors!(nodes[0], 1); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + check_added_monitors!(nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + // Finally deliver the RAA to nodes[1] which results in a CS response to the last update + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_2, 1000000); + let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap(); + check_added_monitors!(nodes[0], 1); + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed).unwrap(); + check_added_monitors!(nodes[0], 1); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_3, 1000000); + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); +} + +#[test] +fn claim_while_disconnected_monitor_update_fail() { + // Test for claiming a payment while disconnected and then having the resulting + // channel-update-generated monitor update fail. This kind of thing isn't a particularly + // contrived case for nodes with network instability. + // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling + // code introduced a regression in this test (specifically, this caught a removal of the + // channel_reestablish handling ensuring the order was sensical given the messages used). + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + // Forward a payment for B to claim + let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + assert!(nodes[1].node.claim_funds(payment_preimage_1)); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + + let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor + // update. + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Send a second payment from A to B, resulting in a commitment update that gets swallowed with + // the monitor still failed + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, payment_hash_2).unwrap(); + check_added_monitors!(nodes[0], 1); + + let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]).unwrap(); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() { + assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); + } else { panic!(); } + // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC + // until we've test_restore_channel_monitor'd and updated for the new commitment transaction. + + // Now un-fail the monitor, which will result in B sending its original commitment update, + // receiving the commitment update from A, and the resulting commitment dances. + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + + let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(bs_msgs.len(), 2); + + match bs_msgs[0] { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap(); + check_added_monitors!(nodes[0], 1); + + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + }, + _ => panic!("Unexpected event"), + } + + match bs_msgs[1] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg).unwrap(); + check_added_monitors!(nodes[0], 1); + }, + _ => panic!("Unexpected event"), + } + + let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed).unwrap(); + check_added_monitors!(nodes[0], 1); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_2, 1000000); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + }, + _ => panic!("Unexpected event"), + } + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); +} + +#[test] +fn monitor_failed_no_reestablish_response() { + // Test for receiving a channel_reestablish after a monitor update failure resulted in no + // response to a commitment_signed. + // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing + // debug_assert!() failure in channel_reestablish handling. + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + // Route the payment and deliver the initial commitment_signed (with a monitor update failure + // on receipt). + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, payment_hash_1).unwrap(); + check_added_monitors!(nodes[0], 1); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[1], 1); + + // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1] + // is still failing to update monitors. + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + + let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect).unwrap(); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap(); + check_added_monitors!(nodes[0], 1); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap(); + check_added_monitors!(nodes[0], 1); + + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_1, 1000000); + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); +} + +#[test] +fn first_message_on_recv_ordering() { + // Test that if the initial generator of a monitor-update-frozen state doesn't generate + // messages, we're willing to flip the order of response messages if neccessary in resposne to + // a commitment_signed which needs to send an RAA first. + // At a high level, our goal is to fail monitor updating in response to an RAA which needs no + // response and then handle a CS while in the failed state, requiring an RAA followed by a CS + // response. To do this, we start routing two payments, with the final RAA for the first being + // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will + // have no pending response but will want to send a RAA/CS (with the updates for the second + // payment applied). + // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here. + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + // Route the first payment outbound, holding the last RAA for B until we are set up so that we + // can deliver it and fail the monitor update. + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, payment_hash_1).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + check_added_monitors!(nodes[1], 1); + let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap(); + check_added_monitors!(nodes[0], 1); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap(); + check_added_monitors!(nodes[0], 1); + + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + // Route the second payment, generating an update_add_htlc/commitment_signed + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, payment_hash_2).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + + // Deliver the final RAA for the first payment, which does not require a response. RAAs + // generally require a commitment_signed, so the fact that we're expecting an opposite response + // to the next message also tests resetting the delivery order. + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[1], 1); + + // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an + // RAA/CS response, which should be generated when we call test_restore_channel_monitor (with + // the appropriate HTLC acceptance). + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() { + assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); + } else { panic!(); } + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_1, 1000000); + + let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap(); + check_added_monitors!(nodes[0], 1); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap(); + check_added_monitors!(nodes[0], 1); + + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_received!(nodes[1], payment_hash_2, 1000000); + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); +} diff --git a/src/ln/channel.rs b/src/ln/channel.rs index c6dc595f..61761078 100644 --- a/src/ln/channel.rs +++ b/src/ln/channel.rs @@ -1790,8 +1790,24 @@ impl Channel { self.received_commitment_while_awaiting_raa = (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) != 0; if (self.channel_state & ChannelState::MonitorUpdateFailed as u32) != 0 { + // In case we initially failed monitor updating without requiring a response, we need + // to make sure the RAA gets sent first. + if !self.monitor_pending_commitment_signed { + self.monitor_pending_order = Some(RAACommitmentOrder::RevokeAndACKFirst); + } self.monitor_pending_revoke_and_ack = true; - self.monitor_pending_commitment_signed |= need_our_commitment; + if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { + // If we were going to send a commitment_signed after the RAA, go ahead and do all + // the corresponding HTLC status updates so that get_last_commitment_update + // includes the right HTLCs. + // Note that this generates a monitor update that we ignore! This is OK since we + // won't actually send the commitment_signed that generated the update to the other + // side until the latest monitor has been pulled from us and stored. + self.monitor_pending_commitment_signed = true; + self.send_commitment_no_status_check()?; + } + // TODO: Call maybe_propose_first_closing_signed on restoration (or call it here and + // re-send the message on restoration) return Err(ChannelError::Ignore("Previous monitor update failure prevented generation of RAA")); } @@ -2039,6 +2055,10 @@ impl Channel { // cells) while we can't update the monitor, so we just return what we have. if require_commitment { self.monitor_pending_commitment_signed = true; + // When the monitor updating is restored we'll call get_last_commitment_update(), + // which does not update state, but we're definitely now awaiting a remote revoke + // before we can step forward any more, so set it here. + self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32; } self.monitor_pending_forwards.append(&mut to_forward_infos); self.monitor_pending_failures.append(&mut revoked_htlcs); @@ -2421,9 +2441,6 @@ impl Channel { log_debug!(self, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.channel_id())); } - // If monitor_pending_order is set, it must be CommitmentSigned if we have no RAA - debug_assert!(self.monitor_pending_order != Some(RAACommitmentOrder::RevokeAndACKFirst) || required_revoke.is_some()); - if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 { self.monitor_pending_commitment_signed = true; return Ok((resend_funding_locked, None, None, None, order, shutdown_msg)); diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index b824b99b..912e22ab 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -471,6 +471,12 @@ macro_rules! return_monitor_err { return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) }, ChannelMonitorUpdateErr::TemporaryFailure => { + if !$resend_commitment { + debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa); + } + if !$resend_raa { + debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment); + } $entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails); return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())); },