X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchanmon_update_fail_tests.rs;fp=lightning%2Fsrc%2Fln%2Fchanmon_update_fail_tests.rs;h=15d46b04688930d03525b6347d6b714a28713b28;hb=7544030bb63fee6484fc178bb2ac8f382fe3b5b1;hp=ced4ceee95577007e51da30e8f703942f8619e04;hpb=7bc52aa62cb3c32f51b242ad3fa0ae16aee80cec;p=rust-lightning diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index ced4ceee..15d46b04 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -7,7 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. -//! Functional tests which test the correct handling of ChannelMonitorUpdateErr returns from +//! Functional tests which test the correct handling of ChannelMonitorUpdateStatus returns from //! monitor updates. //! There are a bunch of these as their handling is relatively error-prone so they are split out //! here. See also the chanmon_fail_consistency fuzz test. @@ -18,7 +18,7 @@ use bitcoin::hash_types::BlockHash; use bitcoin::network::constants::Network; use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; use chain::transaction::OutPoint; -use chain::{ChannelMonitorUpdateErr, Listen, Watch}; +use chain::{ChannelMonitorUpdateStatus, Listen, Watch}; use ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure}; use ln::channel::AnnouncementSigsState; use ln::msgs; @@ -50,7 +50,7 @@ fn test_simple_monitor_permanent_update_fail() { create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); - chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure)); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), true, APIError::ChannelUnavailable {..}, {}); check_added_monitors!(nodes[0], 2); @@ -65,6 +65,8 @@ fn test_simple_monitor_permanent_update_fail() { _ => panic!("Unexpected event"), }; + assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); + // TODO: Once we hit the chain with the failure transaction we should check that we get a // PaymentPathFailed event @@ -114,7 +116,7 @@ fn test_monitor_and_persister_update_fail() { &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1; assert!(new_monitor == *monitor); let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert!(chain_mon.watch_channel(outpoint, new_monitor).is_ok()); + assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed); chain_mon }; let header = BlockHeader { @@ -127,8 +129,8 @@ fn test_monitor_and_persister_update_fail() { }; chain_mon.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200); - // Set the persister's return value to be a TemporaryFailure. - persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + // Set the persister's return value to be a InProgress. + persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Try to update ChannelMonitor nodes[1].node.claim_funds(preimage); @@ -140,12 +142,12 @@ fn test_monitor_and_persister_update_fail() { nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2) { if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { - // Check that even though the persister is returning a TemporaryFailure, + // Check that even though the persister is returning a InProgress, // because the update is bogus, ultimately the error that's returned // should be a PermanentFailure. - if let Err(ChannelMonitorUpdateErr::PermanentFailure) = chain_mon.chain_monitor.update_channel(outpoint, update.clone()) {} else { panic!("Expected monitor error to be permanent"); } - logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Failed to persist ChannelMonitor update for channel [0-9a-f]*: TemporaryFailure").unwrap(), 1); - if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); } + if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, update.clone()) {} else { panic!("Expected monitor error to be permanent"); } + logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1); + assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } } else { assert!(false); }; @@ -165,10 +167,10 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); - chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); { - unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), false, APIError::MonitorUpdateFailed, {}); + unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), false, APIError::MonitorUpdateInProgress, {}); check_added_monitors!(nodes[0], 1); } @@ -182,7 +184,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } - chanmon_cfgs[0].persister.set_update_ret(Ok(())); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); @@ -218,8 +220,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { // Now set it to failed again... let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); { - chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); - unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {}); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateInProgress, {}); check_added_monitors!(nodes[0], 1); } @@ -259,8 +261,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // * First we route a payment, then get a temporary monitor update failure when trying to // route a second payment. We then claim the first payment. // * If disconnect_count is set, we will disconnect at this point (which is likely as - // TemporaryFailure likely indicates net disconnect which resulted in failing to update - // the ChannelMonitor on a watchtower). + // InProgress likely indicates net disconnect which resulted in failing to update the + // ChannelMonitor on a watchtower). // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment // immediately, otherwise we wait disconnect and deliver them via the reconnect // channel_reestablish processing (ie disconnect_count & 16 makes no sense if @@ -282,8 +284,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Now try to send a second payment which will fail to send let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); - unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {}); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateInProgress, {}); check_added_monitors!(nodes[0], 1); } @@ -337,7 +339,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } // Now fix monitor updating... - chanmon_cfgs[0].persister.set_update_ret(Ok(())); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); @@ -629,14 +631,14 @@ fn test_monitor_update_fail_cs() { let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -660,7 +662,7 @@ fn test_monitor_update_fail_cs() { assert!(updates.update_fee.is_none()); assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); @@ -670,7 +672,7 @@ fn test_monitor_update_fail_cs() { _ => panic!("Unexpected event"), } - chanmon_cfgs[0].persister.set_update_ret(Ok(())); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); @@ -722,7 +724,7 @@ fn test_monitor_update_fail_no_rebroadcast() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true); - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); @@ -730,7 +732,7 @@ fn test_monitor_update_fail_no_rebroadcast() { assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); check_added_monitors!(nodes[1], 1); - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -779,7 +781,7 @@ fn test_monitor_update_raa_while_paused() { check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -788,10 +790,10 @@ fn test_monitor_update_raa_while_paused() { nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1); + nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1); check_added_monitors!(nodes[0], 1); - chanmon_cfgs[0].persister.set_update_ret(Ok(())); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); @@ -871,7 +873,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Now fail monitor updating. - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); @@ -887,7 +889,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[0], 1); } - chanmon_cfgs[1].persister.set_update_ret(Ok(())); // We succeed in updating the monitor for the first channel + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); @@ -917,7 +919,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Restore monitor updating, ensuring we immediately get a fail-back update and a // update_add update. - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -1121,7 +1123,7 @@ fn test_monitor_update_fail_reestablish() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); @@ -1158,7 +1160,7 @@ fn test_monitor_update_fail_reestablish() { get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()) .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -1223,7 +1225,7 @@ fn raa_no_response_awaiting_raa_state() { // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA, // then restore channel monitor updates. - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1232,10 +1234,10 @@ fn raa_no_response_awaiting_raa_state() { nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1); + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1); check_added_monitors!(nodes[1], 1); - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); // nodes[1] should be AwaitingRAA here! @@ -1326,7 +1328,7 @@ fn claim_while_disconnected_monitor_update_fail() { // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor // update. - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); @@ -1353,7 +1355,7 @@ fn claim_while_disconnected_monitor_update_fail() { // Now un-fail the monitor, which will result in B sending its original commitment update, // receiving the commitment update from A, and the resulting commitment dances. - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -1432,7 +1434,7 @@ fn monitor_failed_no_reestablish_response() { check_added_monitors!(nodes[0], 1); } - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -1458,7 +1460,7 @@ fn monitor_failed_no_reestablish_response() { nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -1532,7 +1534,7 @@ fn first_message_on_recv_ordering() { let payment_event = SendEvent::from_event(events.pop().unwrap()); assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Deliver the final RAA for the first payment, which does not require a response. RAAs // generally require a commitment_signed, so the fact that we're expecting an opposite response @@ -1551,7 +1553,7 @@ fn first_message_on_recv_ordering() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -1595,7 +1597,7 @@ fn test_monitor_update_fail_claim() { let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); @@ -1614,7 +1616,7 @@ fn test_monitor_update_fail_claim() { // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be // paused, so forward shouldn't succeed until we call channel_monitor_updated(). - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1725,13 +1727,13 @@ fn test_monitor_update_on_pending_forwards() { nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -1785,7 +1787,7 @@ fn monitor_update_claim_fail_no_response() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true); - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 1); @@ -1794,7 +1796,7 @@ fn monitor_update_claim_fail_no_response() { assert_eq!(events.len(), 0); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -1832,19 +1834,19 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap(); check_added_monitors!(nodes[0], 0); - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id(); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors!(nodes[1], 1); - chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); - chanmon_cfgs[0].persister.set_update_ret(Ok(())); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); @@ -1884,7 +1886,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); } - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -1956,19 +1958,19 @@ fn test_path_paused_mpp() { // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second // (for the path 0 -> 2 -> 3) fails. - chanmon_cfgs[0].persister.set_update_ret(Ok(())); - chanmon_cfgs[0].persister.set_next_update_ret(Some(Err(ChannelMonitorUpdateErr::TemporaryFailure))); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); + chanmon_cfgs[0].persister.set_next_update_ret(Some(ChannelMonitorUpdateStatus::InProgress)); // Now check that we get the right return value, indicating that the first path succeeded but - // the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as - // some paths succeeded, preventing retry. + // the second got a MonitorUpdateInProgress err. This implies + // PaymentSendFailure::PartialFailure as some paths succeeded, preventing retry. if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { assert_eq!(results.len(), 2); if let Ok(()) = results[0] {} else { panic!(); } - if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); } + if let Err(APIError::MonitorUpdateInProgress) = results[1] {} else { panic!(); } } else { panic!(); } check_added_monitors!(nodes[0], 2); - chanmon_cfgs[0].persister.set_update_ret(Ok(())); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // Pass the first HTLC of the payment along to nodes[3]. let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -2265,7 +2267,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[0], 0); - chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.claim_funds(payment_preimage_0); check_added_monitors!(nodes[0], 1); expect_payment_claimed!(nodes[0], payment_hash_0, 100_000); @@ -2315,7 +2317,8 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[0].node = &nodes_0_deserialized; assert!(nodes_0_read.is_empty()); - nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap(); + assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor), + ChannelMonitorUpdateStatus::Completed); check_added_monitors!(nodes[0], 1); } else { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); @@ -2359,7 +2362,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // If we finish updating the monitor, we should free the holding cell right away (this did // not occur prior to #756). - chanmon_cfgs[0].persister.set_update_ret(Ok(())); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id); @@ -2558,8 +2561,8 @@ fn test_temporary_error_during_shutdown() { let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); - chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap(); nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &channelmanager::provided_init_features(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); @@ -2570,8 +2573,8 @@ fn test_temporary_error_during_shutdown() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - chanmon_cfgs[0].persister.set_update_ret(Ok(())); - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); @@ -2579,7 +2582,7 @@ fn test_temporary_error_during_shutdown() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); @@ -2612,7 +2615,7 @@ fn test_permanent_error_during_sending_shutdown() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; - chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure)); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure); assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok()); check_closed_broadcast!(nodes[0], true); @@ -2633,7 +2636,7 @@ fn test_permanent_error_during_handling_shutdown() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure); assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok()); let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); @@ -2656,20 +2659,20 @@ fn double_temp_error() { let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // `claim_funds` results in a ChannelMonitorUpdate. nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, // which had some asserts that prevented it from being called twice. nodes[1].node.claim_funds(payment_preimage_2); check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000); - chanmon_cfgs[1].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1);