X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchanmon_update_fail_tests.rs;h=b8eb11b527fff1d162bf77242112615ede06c4c4;hb=dddb2e28c1e2586346d94775d5bb63414c6a73bb;hp=385f5b5353ca5f697880c441061de805cfbc2d28;hpb=c86950d5104666b059c4fbe1248afc04610c45c5;p=rust-lightning diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 385f5b53..b8eb11b5 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -19,12 +19,12 @@ use bitcoin::network::constants::Network; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; use crate::chain::transaction::OutPoint; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; -use crate::ln::channelmanager::{ChannelManager, RAACommitmentOrder, PaymentSendFailure, PaymentId}; +use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; +use crate::ln::channelmanager::{ChannelManager, RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields}; use crate::ln::channel::AnnouncementSigsState; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; use crate::util::enforcing_trait_impls::EnforcingSigner; -use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; use crate::util::errors::APIError; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_utils::TestBroadcaster; @@ -50,7 +50,9 @@ fn test_simple_monitor_permanent_update_fail() { let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure); - unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)), true, APIError::ChannelUnavailable {..}, {}); + unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) + ), true, APIError::ChannelUnavailable {..}, {}); check_added_monitors!(nodes[0], 2); let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); @@ -108,12 +110,13 @@ fn test_monitor_and_persister_update_fail() { blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 200); 200])), }; let chain_mon = { - let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); - let mut w = test_utils::TestVecWriter(Vec::new()); - monitor.write(&mut w).unwrap(); - let new_monitor = <(BlockHash, ChannelMonitor)>::read( - &mut io::Cursor::new(&w.0), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; - assert!(new_monitor == *monitor); + let new_monitor = { + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap(); + let new_monitor = <(BlockHash, ChannelMonitor)>::read( + &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; + assert!(new_monitor == *monitor); + new_monitor + }; let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed); chain_mon @@ -143,12 +146,12 @@ fn test_monitor_and_persister_update_fail() { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2); - if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { + if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Check that even though the persister is returning a InProgress, // because the update is bogus, ultimately the error that's returned // should be a PermanentFailure. if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); } - logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1); + logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1); assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } } @@ -172,7 +175,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); { - unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)), false, APIError::MonitorUpdateInProgress, {}); + unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) + ), false, APIError::MonitorUpdateInProgress, {}); check_added_monitors!(nodes[0], 1); } @@ -181,8 +186,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { assert_eq!(nodes[0].node.list_channels().len(), 1); if disconnect { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } @@ -225,7 +230,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)), false, APIError::MonitorUpdateInProgress, {}); + unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) + ), false, APIError::MonitorUpdateInProgress, {}); check_added_monitors!(nodes[0], 1); } @@ -234,8 +241,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { assert_eq!(nodes[0].node.list_channels().len(), 1); if disconnect { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } @@ -289,7 +296,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)), false, APIError::MonitorUpdateInProgress, {}); + unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) + ), false, APIError::MonitorUpdateInProgress, {}); check_added_monitors!(nodes[0], 1); } @@ -337,8 +346,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { }; if disconnect_count & !disconnect_flags > 0 { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); } // Now fix monitor updating... @@ -348,13 +357,13 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { check_added_monitors!(nodes[0], 0); macro_rules! disconnect_reconnect_peers { () => { { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -373,10 +382,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -629,7 +638,8 @@ fn test_monitor_update_fail_cs() { let (route, our_payment_hash, payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -721,7 +731,8 @@ fn test_monitor_update_fail_no_rebroadcast() { let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment(&route, our_payment_hash, &Some(payment_secret_1), PaymentId(our_payment_hash.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, our_payment_hash, + RecipientOnionFields::secret_only(payment_secret_1), PaymentId(our_payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -768,14 +779,16 @@ fn test_monitor_update_raa_while_paused() { send_payment(&nodes[0], &[&nodes[1]], 5000000); let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment(&route, our_payment_hash_1, &Some(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, our_payment_hash_1, + RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap(); check_added_monitors!(nodes[0], 1); } let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000000); { - nodes[1].node.send_payment(&route, our_payment_hash_2, &Some(our_payment_secret_2), PaymentId(our_payment_hash_2.0)).unwrap(); + nodes[1].node.send_payment_with_route(&route, our_payment_hash_2, + RecipientOnionFields::secret_only(our_payment_secret_2), PaymentId(our_payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[1], 1); } let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0)); @@ -863,7 +876,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // holding cell. let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); { - nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -887,7 +901,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // being paused waiting a monitor update. let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); { - nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash_3, + RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -906,7 +921,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { // Try to route another payment backwards from 2 to make sure 1 holds off on responding let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000); - nodes[2].node.send_payment(&route, payment_hash_4, &Some(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap(); + nodes[2].node.send_payment_with_route(&route, payment_hash_4, + RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap(); check_added_monitors!(nodes[2], 1); send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); @@ -935,7 +951,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Note that the ordering of the events for different nodes is non-prescriptive, though the // ordering of the two events that both go to nodes[2] have to stay in the same order. - let (nodes_0_event, events_3) = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &events_3); + let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events_3); let messages_a = match nodes_0_event { MessageSendEvent::UpdateHTLCs { node_id, mut updates } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()); @@ -949,12 +965,12 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { _ => panic!("Unexpected event type!"), }; - let (nodes_2_event, events_3) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events_3); + let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3); let send_event_b = SendEvent::from_event(nodes_2_event); assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id()); let raa = if test_ignore_second_cs { - let (nodes_2_event, _events_3) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &events_3); + let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3); match nodes_2_event { MessageSendEvent::SendRevokeAndACK { node_id, msg } => { assert_eq!(node_id, nodes[2].node.get_our_node_id()); @@ -1110,8 +1126,8 @@ fn test_monitor_update_fail_reestablish() { let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[2].node.claim_funds(payment_preimage); check_added_monitors!(nodes[2], 1); @@ -1130,8 +1146,8 @@ fn test_monitor_update_fail_reestablish() { commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); @@ -1146,11 +1162,11 @@ fn test_monitor_update_fail_reestablish() { nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell check_added_monitors!(nodes[1], 1); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish); assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish); @@ -1204,9 +1220,11 @@ fn raa_no_response_awaiting_raa_state() { // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS // generation during RAA while in monitor-update-failed state. { - nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); check_added_monitors!(nodes[0], 1); - nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[0], 0); } @@ -1255,7 +1273,8 @@ fn raa_no_response_awaiting_raa_state() { // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync // commitment transaction states) whereas here we can explicitly check for it. { - nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash_3, + RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); check_added_monitors!(nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -1315,15 +1334,15 @@ fn claim_while_disconnected_monitor_update_fail() { // Forward a payment for B to claim let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); @@ -1344,7 +1363,8 @@ fn claim_while_disconnected_monitor_update_fail() { // the monitor still failed let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1426,9 +1446,11 @@ fn monitor_failed_no_reestablish_response() { { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; + get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; + } + { let mut node_1_per_peer_lock; let mut node_1_peer_state_lock; - get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; } @@ -1436,7 +1458,8 @@ fn monitor_failed_no_reestablish_response() { // on receipt). let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1451,11 +1474,11 @@ fn monitor_failed_no_reestablish_response() { // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1] // is still failing to update monitors. - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); @@ -1508,7 +1531,8 @@ fn first_message_on_recv_ordering() { // can deliver it and fail the monitor update. let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1531,7 +1555,8 @@ fn first_message_on_recv_ordering() { // Route the second payment, generating an update_add_htlc/commitment_signed let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[0], 1); } let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1602,7 +1627,6 @@ fn test_monitor_update_fail_claim() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); - expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); @@ -1613,7 +1637,8 @@ fn test_monitor_update_fail_claim() { let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000); { - nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[2].node.send_payment_with_route(&route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[2], 1); } @@ -1628,9 +1653,11 @@ fn test_monitor_update_fail_claim() { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable_ignore!(nodes[1]); let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]); - nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); + nodes[2].node.send_payment_with_route(&route, payment_hash_3, + RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); check_added_monitors!(nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); @@ -1645,6 +1672,7 @@ fn test_monitor_update_fail_claim() { let channel_id = chan_1.2; let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1653,7 +1681,7 @@ fn test_monitor_update_fail_claim() { expect_payment_sent!(nodes[0], payment_preimage_1); // Get the payment forwards, note that they were batched into one commitment update. - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]); @@ -1726,7 +1754,8 @@ fn test_monitor_update_on_pending_forwards() { let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000); { - nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[2].node.send_payment_with_route(&route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[2], 1); } @@ -1739,7 +1768,6 @@ fn test_monitor_update_on_pending_forwards() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); @@ -1752,12 +1780,18 @@ fn test_monitor_update_on_pending_forwards() { commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true); let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); - if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[0] { + assert_eq!(events.len(), 3); + if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[1] { assert_eq!(payment_hash, payment_hash_1); assert!(payment_failed_permanently); } else { panic!("Unexpected event!"); } - match events[1] { + match events[2] { + Event::PaymentFailed { payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_1); + }, + _ => panic!("Unexpected event"), + } + match events[0] { Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; @@ -1785,7 +1819,8 @@ fn monitor_update_claim_fail_no_response() { // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1797,7 +1832,6 @@ fn monitor_update_claim_fail_no_response() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); - expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1805,6 +1839,7 @@ fn monitor_update_claim_fail_no_response() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1855,6 +1890,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[0], 0); + expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 0); @@ -1873,9 +1909,12 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: } // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + // But we want to re-emit ChannelPending + expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1976,7 +2015,9 @@ fn test_path_paused_mpp() { // Now check that we get the right return value, indicating that the first path succeeded but // the second got a MonitorUpdateInProgress err. This implies // PaymentSendFailure::PartialFailure as some paths succeeded, preventing retry. - if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)) { + if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment_with_route( + &route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) + ) { assert_eq!(results.len(), 2); if let Ok(()) = results[0] {} else { panic!(); } if let Err(APIError::MonitorUpdateInProgress) = results[1] {} else { panic!(); } @@ -2021,7 +2062,8 @@ fn test_pending_update_fee_ack_on_reconnect() { send_payment(&nodes[0], &[&nodes[1]], 100_000_00); let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000); - nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); + nodes[1].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[1], 1); let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); // bs_initial_send_msgs are not delivered until they are re-generated after reconnect @@ -2041,12 +2083,12 @@ fn test_pending_update_fee_ack_on_reconnect() { let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // bs_first_raa is not delivered until it is re-generated after reconnect - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg); @@ -2169,12 +2211,12 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg); @@ -2272,19 +2314,20 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // (c) will not be freed from the holding cell. let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[1], &[&nodes[0]], 100_000); - nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); check_added_monitors!(nodes[0], 1); let send = SendEvent::from_node(&nodes[0]); assert_eq!(send.msgs.len(), 1); - nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[0], 0); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.claim_funds(payment_preimage_0); check_added_monitors!(nodes[0], 1); - expect_payment_claimed!(nodes[0], payment_hash_0, 100_000); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg); @@ -2303,15 +2346,15 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); } else { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); } - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); // Now reconnect the two - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -2347,6 +2390,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id); + expect_payment_claimed!(nodes[0], payment_hash_0, 100_000); // New outbound messages should be generated immediately upon a call to // get_and_clear_pending_msg_events (but not before). @@ -2438,7 +2482,8 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be // awaiting a remote revoke_and_ack from nodes[0]. let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, second_payment_hash, + RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -2493,8 +2538,8 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); } - nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false); - nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id()); + nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id()); if second_fails { reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); @@ -2600,7 +2645,15 @@ fn test_permanent_error_during_sending_shutdown() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure); assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok()); - check_closed_broadcast!(nodes[0], true); + + // We always send the `shutdown` response when initiating a shutdown, even if we immediately + // close the channel thereafter. + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 3); + if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); } + if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); } + if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); } + check_added_monitors!(nodes[0], 2); check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }); } @@ -2623,7 +2676,15 @@ fn test_permanent_error_during_handling_shutdown() { assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok()); let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &shutdown); - check_closed_broadcast!(nodes[1], true); + + // We always send the `shutdown` response when receiving a shutdown, even if we immediately + // close the channel thereafter. + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 3); + if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); } + if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); } + if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); } + check_added_monitors!(nodes[1], 2); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }); } @@ -2645,7 +2706,6 @@ fn double_temp_error() { // `claim_funds` results in a ChannelMonitorUpdate. nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); - expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); @@ -2653,7 +2713,6 @@ fn double_temp_error() { // which had some asserts that prevented it from being called twice. nodes[1].node.claim_funds(payment_preimage_2); check_added_monitors!(nodes[1], 1); - expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); @@ -2662,11 +2721,24 @@ fn double_temp_error() { check_added_monitors!(nodes[1], 0); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2); - // Complete the first HTLC. - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); + // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions + // and get both PaymentClaimed events at once. + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match events[0] { + Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_1), + _ => panic!("Unexpected Event: {:?}", events[0]), + } + match events[1] { + Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_2), + _ => panic!("Unexpected Event: {:?}", events[1]), + } + + assert_eq!(msg_events.len(), 1); let (update_fulfill_1, commitment_signed_b1, node_id) = { - match &events[0] { + match &msg_events[0] { &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -2772,6 +2844,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors!(nodes[1], 1); + expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(bs_signed_locked.len(), if use_0conf { 2 } else { 1 }); @@ -2870,6 +2943,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg); check_added_monitors!(nodes[0], 1); + expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); if lock_commitment {