X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchanmon_update_fail_tests.rs;h=a6c5649c1d954e5b76f7efe8e28dd675d9d76c31;hb=010c34f351f4ae37c419fc5d1c55c60b637bf241;hp=4a1695a9b86e7adcb2058b40ef0657a684ce8c9d;hpb=0a5ccd1f13add92ed949bd3f75f5dff18cd0ba7e;p=rust-lightning diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 4a1695a9..a6c5649c 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -16,15 +16,14 @@ use bitcoin::blockdata::block::{Block, BlockHeader}; use bitcoin::blockdata::constants::genesis_block; use bitcoin::hash_types::BlockHash; use bitcoin::network::constants::Network; -use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr}; +use chain::channelmonitor::ChannelMonitor; use chain::transaction::OutPoint; -use chain::Listen; -use chain::Watch; -use ln::{PaymentPreimage, PaymentHash}; +use chain::{ChannelMonitorUpdateErr, Listen, Watch}; use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure}; +use ln::channel::AnnouncementSigsState; use ln::features::InitFeatures; use ln::msgs; -use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler}; +use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; use util::config::UserConfig; use util::enforcing_trait_impls::EnforcingSigner; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason}; @@ -32,9 +31,6 @@ use util::errors::APIError; use util::ser::{ReadableArgs, Writeable}; use util::test_utils::TestBroadcaster; -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; - use ln::functional_test_utils::*; use util::test_utils; @@ -43,22 +39,17 @@ use io; use prelude::*; use sync::{Arc, Mutex}; -// If persister_fail is true, we have the persister return a PermanentFailure -// instead of the higher-level ChainMonitor. -fn do_test_simple_monitor_permanent_update_fail(persister_fail: bool) { +#[test] +fn test_simple_monitor_permanent_update_fail() { // Test that we handle a simple permanent monitor update failure - let mut chanmon_cfgs = create_chanmon_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); - - match persister_fail { - true => chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure)), - false => *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure)) - } + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure)); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), true, APIError::ChannelUnavailable {..}, {}); check_added_monitors!(nodes[0], 2); @@ -115,8 +106,7 @@ fn test_monitor_and_persister_update_fail() { blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet).header, 200); 200])), }; let chain_mon = { - let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap(); - let monitor = monitors.get(&outpoint).unwrap(); + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); let new_monitor = <(BlockHash, ChannelMonitor)>::read( @@ -144,7 +134,7 @@ fn test_monitor_and_persister_update_fail() { // because the update is bogus, ultimately the error that's returned // should be a PermanentFailure. if let Err(ChannelMonitorUpdateErr::PermanentFailure) = chain_mon.chain_monitor.update_channel(outpoint, update.clone()) {} else { panic!("Expected monitor error to be permanent"); } - logger.assert_log_contains("lightning::chain::chainmonitor".to_string(), "Failed to persist channel monitor update: TemporaryFailure".to_string(), 1); + logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Failed to persist ChannelMonitor update for channel [0-9a-f]*: TemporaryFailure").unwrap(), 1); if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); } } else { assert!(false); } } else { assert!(false); }; @@ -154,20 +144,10 @@ fn test_monitor_and_persister_update_fail() { assert_eq!(events.len(), 1); } -#[test] -fn test_simple_monitor_permanent_update_fail() { - do_test_simple_monitor_permanent_update_fail(false); - - // Test behavior when the persister returns a PermanentFailure. - do_test_simple_monitor_permanent_update_fail(true); -} - -// If persister_fail is true, we have the persister return a TemporaryFailure instead of the -// higher-level ChainMonitor. -fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail: bool) { +fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { // Test that we can recover from a simple temporary monitor update failure optionally with // a disconnect in between - let mut chanmon_cfgs = create_chanmon_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -175,10 +155,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); - match persister_fail { - true => chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)), - false => *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)) - } + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); { unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), false, APIError::MonitorUpdateFailed, {}); @@ -195,12 +172,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } - match persister_fail { - true => chanmon_cfgs[0].persister.set_update_ret(Ok(())), - false => *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())) - } - let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events(); @@ -234,10 +208,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail // Now set it to failed again... let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); { - match persister_fail { - true => chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)), - false => *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)) - } + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {}); check_added_monitors!(nodes[0], 1); } @@ -266,12 +237,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail #[test] fn test_simple_monitor_temporary_update_fail() { - do_test_simple_monitor_temporary_update_fail(false, false); - do_test_simple_monitor_temporary_update_fail(true, false); - - // Test behavior when the persister returns a TemporaryFailure. - do_test_simple_monitor_temporary_update_fail(false, true); - do_test_simple_monitor_temporary_update_fail(true, true); + do_test_simple_monitor_temporary_update_fail(false); + do_test_simple_monitor_temporary_update_fail(true); } fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { @@ -305,7 +272,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Now try to send a second payment which will fail to send let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {}); check_added_monitors!(nodes[0], 1); } @@ -334,7 +301,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentSent { ref payment_preimage, ref payment_hash } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => { assert_eq!(*payment_preimage, payment_preimage_1); assert_eq!(*payment_hash, payment_hash_1); }, @@ -358,9 +325,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } // Now fix monitor updating... - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); macro_rules! disconnect_reconnect_peers { () => { { @@ -427,7 +394,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentSent { ref payment_preimage, ref payment_hash } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => { assert_eq!(*payment_preimage, payment_preimage_1); assert_eq!(*payment_hash, payment_hash_1); }, @@ -581,6 +548,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); expect_pending_htlcs_forwardable!(nodes[1]); @@ -649,16 +617,16 @@ fn test_monitor_update_fail_cs() { let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(responses.len(), 2); @@ -680,7 +648,7 @@ fn test_monitor_update_fail_cs() { assert!(updates.update_fee.is_none()); assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); @@ -690,9 +658,9 @@ fn test_monitor_update_fail_cs() { _ => panic!("Unexpected event"), } - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -742,7 +710,7 @@ fn test_monitor_update_fail_no_rebroadcast() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); @@ -750,9 +718,9 @@ fn test_monitor_update_fail_no_rebroadcast() { assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); check_added_monitors!(nodes[1], 1); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); @@ -799,7 +767,7 @@ fn test_monitor_update_raa_while_paused() { check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -811,9 +779,9 @@ fn test_monitor_update_raa_while_paused() { nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1); check_added_monitors!(nodes[0], 1); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -891,7 +859,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Now fail monitor updating. - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); @@ -907,7 +875,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[0], 1); } - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); // We succeed in updating the monitor for the first channel + chanmon_cfgs[1].persister.set_update_ret(Ok(())); // We succeed in updating the monitor for the first channel send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); @@ -937,9 +905,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Restore monitor updating, ensuring we immediately get a fail-back update and a // update_add update. - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); @@ -1120,12 +1088,12 @@ fn test_monitor_update_fail_reestablish() { let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - assert!(nodes[2].node.claim_funds(our_payment_preimage)); + assert!(nodes[2].node.claim_funds(payment_preimage)); check_added_monitors!(nodes[2], 1); let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -1139,7 +1107,7 @@ fn test_monitor_update_fail_reestablish() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); @@ -1176,9 +1144,9 @@ fn test_monitor_update_fail_reestablish() { get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()) .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1189,13 +1157,7 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage), - _ => panic!("Unexpected event"), - } + expect_payment_sent!(nodes[0], payment_preimage); } #[test] @@ -1247,7 +1209,7 @@ fn raa_no_response_awaiting_raa_state() { // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA, // then restore channel monitor updates. - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1259,9 +1221,9 @@ fn raa_no_response_awaiting_raa_state() { nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1); check_added_monitors!(nodes[1], 1); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); // nodes[1] should be AwaitingRAA here! check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1330,7 +1292,7 @@ fn claim_while_disconnected_monitor_update_fail() { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; // Forward a payment for B to claim - let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); @@ -1349,7 +1311,7 @@ fn claim_while_disconnected_monitor_update_fail() { // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor // update. - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); @@ -1376,9 +1338,9 @@ fn claim_while_disconnected_monitor_update_fail() { // Now un-fail the monitor, which will result in B sending its original commitment update, // receiving the commitment update from A, and the resulting commitment dances. - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1425,16 +1387,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { ref payment_preimage, ref payment_hash } => { - assert_eq!(*payment_preimage, payment_preimage_1); - assert_eq!(*payment_hash, payment_hash_1); - }, - _ => panic!("Unexpected event"), - } + expect_payment_sent!(nodes[0], payment_preimage_1); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } @@ -1450,6 +1403,11 @@ fn monitor_failed_no_reestablish_response() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; + { + let mut lock; + get_channel_ref!(nodes[0], lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; + get_channel_ref!(nodes[1], lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; + } // Route the payment and deliver the initial commitment_signed (with a monitor update failure // on receipt). @@ -1459,7 +1417,7 @@ fn monitor_failed_no_reestablish_response() { check_added_monitors!(nodes[0], 1); } - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -1485,9 +1443,9 @@ fn monitor_failed_no_reestablish_response() { nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1559,7 +1517,7 @@ fn first_message_on_recv_ordering() { let payment_event = SendEvent::from_event(events.pop().unwrap()); assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); // Deliver the final RAA for the first payment, which does not require a response. RAAs // generally require a commitment_signed, so the fact that we're expecting an opposite response @@ -1578,9 +1536,9 @@ fn first_message_on_recv_ordering() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); @@ -1622,7 +1580,7 @@ fn test_monitor_update_fail_claim() { let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); assert!(nodes[1].node.claim_funds(payment_preimage_1)); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); @@ -1640,7 +1598,7 @@ fn test_monitor_update_fail_claim() { // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be // paused, so forward shouldn't succeed until we call channel_monitor_updated(). - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1663,8 +1621,8 @@ fn test_monitor_update_fail_claim() { commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); // Now restore monitor updating on the 0<->1 channel and claim the funds on B. - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1751,15 +1709,15 @@ fn test_monitor_update_on_pending_forwards() { nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1796,7 +1754,7 @@ fn monitor_update_claim_fail_no_response() { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; // Forward a payment for B to claim - let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); @@ -1811,16 +1769,16 @@ fn monitor_update_claim_fail_no_response() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); assert!(nodes[1].node.claim_funds(payment_preimage_1)); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1832,23 +1790,14 @@ fn monitor_update_claim_fail_no_response() { let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false); - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { ref payment_preimage, ref payment_hash } => { - assert_eq!(*payment_preimage, payment_preimage_1); - assert_eq!(*payment_hash, payment_hash_1); - }, - _ => panic!("Unexpected event"), - } + expect_payment_sent!(nodes[0], payment_preimage_1); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } -// confirm_a_first and restore_b_before_conf are wholly unrelated to earlier bools and // restore_b_before_conf has no meaning if !confirm_a_first -fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: bool) { +// restore_b_before_lock has no meaning if confirm_a_first +fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: bool, restore_b_before_lock: bool) { // Test that if the monitor update generated by funding_transaction_generated fails we continue // the channel setup happily after the update is restored. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -1865,21 +1814,21 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_tx.clone()).unwrap(); check_added_monitors!(nodes[0], 0); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id(); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors!(nodes[1], 1); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); let events = nodes[0].node.get_and_clear_pending_events(); @@ -1890,6 +1839,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: if confirm_a_first { confirm_transaction(&nodes[0], &funding_tx); nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id())); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); } else { assert!(!restore_b_before_conf); confirm_transaction(&nodes[1], &funding_tx); @@ -1908,20 +1859,32 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); } + if !confirm_a_first && !restore_b_before_lock { + confirm_transaction(&nodes[0], &funding_tx); + nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id())); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + } - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { - nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id())); - - confirm_transaction(&nodes[0], &funding_tx); - let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]); - (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked)) + if !restore_b_before_lock { + let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]); + (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked)) + } else { + nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id())); + confirm_transaction(&nodes[0], &funding_tx); + let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]); + (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked)) + } } else { if restore_b_before_conf { + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); confirm_transaction(&nodes[1], &funding_tx); } let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]); @@ -1941,9 +1904,10 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: #[test] fn during_funding_monitor_fail() { - do_during_funding_monitor_fail(true, true); - do_during_funding_monitor_fail(true, false); - do_during_funding_monitor_fail(false, false); + do_during_funding_monitor_fail(true, true, false); + do_during_funding_monitor_fail(true, false, false); + do_during_funding_monitor_fail(false, false, false); + do_during_funding_monitor_fail(false, false, true); } #[test] @@ -1974,19 +1938,19 @@ fn test_path_paused_mpp() { // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second // (for the path 0 -> 2 -> 3) fails. - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); - *nodes[0].chain_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + chanmon_cfgs[0].persister.set_next_update_ret(Some(Err(ChannelMonitorUpdateErr::TemporaryFailure))); // Now check that we get the right return value, indicating that the first path succeeded but // the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as // some paths succeeded, preventing retry. - if let Err(PaymentSendFailure::PartialFailure(results)) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { + if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) { assert_eq!(results.len(), 2); if let Ok(()) = results[0] {} else { panic!(); } if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); } } else { panic!(); } check_added_monitors!(nodes[0], 2); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); // Pass the first HTLC of the payment along to nodes[3]. let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1995,8 +1959,8 @@ fn test_path_paused_mpp() { // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. - let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None); @@ -2237,7 +2201,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[0], 0); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); assert!(nodes[0].node.claim_funds(payment_preimage_0)); check_added_monitors!(nodes[0], 1); @@ -2257,7 +2221,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { if reload_a { let nodes_0_serialized = nodes[0].node.encode(); let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); + get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap(); persister = test_utils::TestPersister::new(); let keys_manager = &chanmon_cfgs[0].keys_manager; @@ -2330,9 +2294,9 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // If we finish updating the monitor, we should free the holding cell right away (this did // not occur prior to #756). - *nodes[0].chain_monitor.update_ret.lock().unwrap() = None; - let (funding_txo, mon_id) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&funding_txo, mon_id); + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id); // New outbound messages should be generated immediately upon a call to // get_and_clear_pending_msg_events (but not before). @@ -2353,7 +2317,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - expect_payment_sent!(nodes[1], payment_preimage_0); + expect_payment_sent_without_paths!(nodes[1], payment_preimage_0); assert_eq!(updates.update_add_htlcs.len(), 1); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); updates.commitment_signed @@ -2372,7 +2336,18 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); - expect_pending_htlcs_forwardable!(nodes[1]); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + match events[1] { + Event::PaymentPathSuccessful { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.process_pending_htlc_forwards(); expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 100000); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); @@ -2457,9 +2432,10 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]); - expect_payment_sent!(nodes[0], payment_preimage); + expect_payment_sent_without_paths!(nodes[0], payment_preimage); if htlc_status == HTLCStatusAtDupClaim::Cleared { commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false); + expect_payment_path_successful!(nodes[0]); } } else { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2483,10 +2459,11 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]); - expect_payment_sent!(nodes[0], payment_preimage); + expect_payment_sent_without_paths!(nodes[0], payment_preimage); } if htlc_status != HTLCStatusAtDupClaim::Cleared { commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false); + expect_payment_path_successful!(nodes[0]); } } @@ -2514,8 +2491,8 @@ fn test_temporary_error_during_shutdown() { let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[0].node.close_channel(&channel_id).unwrap(); nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); @@ -2526,18 +2503,18 @@ fn test_temporary_error_during_shutdown() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - *nodes[0].chain_monitor.update_ret.lock().unwrap() = None; - *nodes[1].chain_monitor.update_ret.lock().unwrap() = None; + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); - let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id())); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = None; - let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); @@ -2568,7 +2545,7 @@ fn test_permanent_error_during_sending_shutdown() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure)); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure)); assert!(nodes[0].node.close_channel(&channel_id).is_ok()); check_closed_broadcast!(nodes[0], true); @@ -2589,7 +2566,7 @@ fn test_permanent_error_during_handling_shutdown() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure)); assert!(nodes[0].node.close_channel(&channel_id).is_ok()); let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); @@ -2612,24 +2589,24 @@ fn double_temp_error() { let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); let (payment_preimage_2, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); // `claim_funds` results in a ChannelMonitorUpdate. assert!(nodes[1].node.claim_funds(payment_preimage_1)); check_added_monitors!(nodes[1], 1); - let (funding_tx, latest_update_1) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, // which had some asserts that prevented it from being called twice. assert!(nodes[1].node.claim_funds(payment_preimage_2)); check_added_monitors!(nodes[1], 1); - *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); + chanmon_cfgs[1].persister.set_update_ret(Ok(())); - let (_, latest_update_2) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].node.channel_monitor_updated(&funding_tx, latest_update_1); + let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); - nodes[1].node.channel_monitor_updated(&funding_tx, latest_update_2); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2); // Complete the first HTLC. let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -2650,7 +2627,7 @@ fn double_temp_error() { assert_eq!(node_id, nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1); check_added_monitors!(nodes[0], 0); - expect_payment_sent!(nodes[0], payment_preimage_1); + expect_payment_sent_without_paths!(nodes[0], payment_preimage_1); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1); check_added_monitors!(nodes[0], 1); nodes[0].node.process_pending_htlc_forwards(); @@ -2688,6 +2665,7 @@ fn double_temp_error() { }; nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_b2); check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_2); check_added_monitors!(nodes[0], 0);