X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchanmon_update_fail_tests.rs;h=30088f2eba6815256990d634afa9e7ec1f2c5d53;hb=843d25d750c3408d3f8f917764b8a58019a9dd81;hp=dd7b1906ca079aced33d3a62384fc1772d02b49c;hpb=0c57018f2fb5618f976542a4d24adee29cf49c96;p=rust-lightning diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index dd7b1906..30088f2e 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -28,7 +28,7 @@ use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler}; use routing::router::get_route; use util::config::UserConfig; use util::enforcing_trait_impls::EnforcingSigner; -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason}; use util::errors::APIError; use util::ser::{ReadableArgs, Writeable}; use util::test_utils::TestBroadcaster; @@ -40,8 +40,9 @@ use ln::functional_test_utils::*; use util::test_utils; +use io; use prelude::*; -use std::sync::{Arc, Mutex}; +use sync::{Arc, Mutex}; // If persister_fail is true, we have the persister return a PermanentFailure // instead of the higher-level ChainMonitor. @@ -61,7 +62,7 @@ fn do_test_simple_monitor_permanent_update_fail(persister_fail: bool) { false => *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure)) } let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), true, APIError::ChannelUnavailable {..}, {}); check_added_monitors!(nodes[0], 2); @@ -77,9 +78,10 @@ fn do_test_simple_monitor_permanent_update_fail(persister_fail: bool) { }; // TODO: Once we hit the chain with the failure transaction we should check that we get a - // PaymentFailed event + // PaymentPathFailed event assert_eq!(nodes[0].node.list_channels().len(), 0); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }); } #[test] @@ -122,7 +124,7 @@ fn test_monitor_and_persister_update_fail() { let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); let new_monitor = <(BlockHash, ChannelMonitor)>::read( - &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1; + &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1; assert!(new_monitor == *monitor); let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); assert!(chain_mon.watch_channel(outpoint, new_monitor).is_ok()); @@ -141,7 +143,7 @@ fn test_monitor_and_persister_update_fail() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2) { - if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) { + if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Check that even though the persister is returning a TemporaryFailure, // because the update is bogus, ultimately the error that's returned // should be a PermanentFailure. @@ -185,7 +187,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), false, APIError::MonitorUpdateFailed, {}); check_added_monitors!(nodes[0], 1); } @@ -197,7 +199,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail if disconnect { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } match persister_fail { @@ -220,11 +222,16 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail let events_3 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { assert_eq!(payment_hash_1, *payment_hash); - assert!(payment_preimage.is_none()); - assert_eq!(payment_secret_1, *payment_secret); assert_eq!(amt, 1000000); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_1, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } }, _ => panic!("Unexpected event"), } @@ -239,7 +246,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail false => *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)) } let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {}); check_added_monitors!(nodes[0], 1); } @@ -251,7 +258,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail if disconnect { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } // ...and make sure we can force-close a frozen channel @@ -260,9 +267,10 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail check_closed_broadcast!(nodes[0], true); // TODO: Once we hit the chain with the failure transaction we should check that we get a - // PaymentFailed event + // PaymentPathFailed event assert_eq!(nodes[0].node.list_channels().len(), 0); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); } #[test] @@ -302,14 +310,14 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; let logger = test_utils::TestLogger::new(); - let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); // Now try to send a second payment which will fail to send let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); { *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {}); check_added_monitors!(nodes[0], 1); } @@ -338,8 +346,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentSent { ref payment_preimage } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash } => { assert_eq!(*payment_preimage, payment_preimage_1); + assert_eq!(*payment_hash, payment_hash_1); }, _ => panic!("Unexpected event"), } @@ -430,8 +439,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentSent { ref payment_preimage } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash } => { assert_eq!(*payment_preimage, payment_preimage_1); + assert_eq!(*payment_hash, payment_hash_1); }, _ => panic!("Unexpected event"), } @@ -589,11 +599,16 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_5 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_5.len(), 1); match events_5[0] { - Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { assert_eq!(payment_hash_2, *payment_hash); - assert!(payment_preimage.is_none()); - assert_eq!(payment_secret_2, *payment_secret); assert_eq!(amt, 1000000); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_2, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } }, _ => panic!("Unexpected event"), } @@ -641,7 +656,7 @@ fn test_monitor_update_fail_cs() { let (payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]); { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -704,11 +719,16 @@ fn test_monitor_update_fail_cs() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentReceived { payment_hash, payment_preimage, payment_secret, amt, user_payment_id: _ } => { + Event::PaymentReceived { payment_hash, ref purpose, amt } => { assert_eq!(payment_hash, our_payment_hash); - assert!(payment_preimage.is_none()); - assert_eq!(our_payment_secret, payment_secret); assert_eq!(amt, 1000000); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(our_payment_secret, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } }, _ => panic!("Unexpected event"), }; @@ -731,7 +751,7 @@ fn test_monitor_update_fail_no_rebroadcast() { let (payment_preimage_1, our_payment_hash, payment_secret_1) = get_payment_preimage_hash!(nodes[1]); { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, our_payment_hash, &Some(payment_secret_1)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -782,7 +802,7 @@ fn test_monitor_update_raa_while_paused() { let (payment_preimage_1, our_payment_hash_1, our_payment_secret_1) = get_payment_preimage_hash!(nodes[1]); { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, our_payment_hash_1, &Some(our_payment_secret_1)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -791,7 +811,7 @@ fn test_monitor_update_raa_while_paused() { let (payment_preimage_2, our_payment_hash_2, our_payment_secret_2) = get_payment_preimage_hash!(nodes[0]); { let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; - let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[1].node.send_payment(&route, our_payment_hash_2, &Some(our_payment_secret_2)).unwrap(); check_added_monitors!(nodes[1], 1); } @@ -883,7 +903,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[2]); { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -910,7 +930,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[2]); { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -931,7 +951,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Try to route another payment backwards from 2 to make sure 1 holds off on responding let (payment_preimage_4, payment_hash_4, payment_secret_4) = get_payment_preimage_hash!(nodes[0]); let net_graph_msg_handler = &nodes[2].net_graph_msg_handler; - let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[2].node.send_payment(&route, payment_hash_4, &Some(payment_secret_4)).unwrap(); check_added_monitors!(nodes[2], 1); @@ -1144,6 +1164,7 @@ fn test_monitor_update_fail_reestablish() { assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[1], Some(1000), false); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); @@ -1158,7 +1179,10 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert_eq!( + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected + nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); @@ -1172,10 +1196,15 @@ fn test_monitor_update_fail_reestablish() { assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id())); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); + assert_eq!( + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish); check_added_monitors!(nodes[1], 0); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert_eq!( + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()) + .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); @@ -1223,7 +1252,7 @@ fn raa_no_response_awaiting_raa_state() { // generation during RAA while in monitor-update-failed state. { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap(); check_added_monitors!(nodes[0], 1); nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); @@ -1277,7 +1306,7 @@ fn raa_no_response_awaiting_raa_state() { // commitment transaction states) whereas here we can explicitly check for it. { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap(); check_added_monitors!(nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1337,7 +1366,7 @@ fn claim_while_disconnected_monitor_update_fail() { let logger = test_utils::TestLogger::new(); // Forward a payment for B to claim - let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); @@ -1352,14 +1381,14 @@ fn claim_while_disconnected_monitor_update_fail() { let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor // update. *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1369,7 +1398,7 @@ fn claim_while_disconnected_monitor_update_fail() { let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1438,8 +1467,9 @@ fn claim_while_disconnected_monitor_update_fail() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentSent { ref payment_preimage } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash } => { assert_eq!(*payment_preimage, payment_preimage_1); + assert_eq!(*payment_hash, payment_hash_1); }, _ => panic!("Unexpected event"), } @@ -1465,7 +1495,7 @@ fn monitor_failed_no_reestablish_response() { let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[1]); { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1492,7 +1522,9 @@ fn monitor_failed_no_reestablish_response() { let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); + let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); + let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); @@ -1539,7 +1571,7 @@ fn first_message_on_recv_ordering() { let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[1]); { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1564,7 +1596,7 @@ fn first_message_on_recv_ordering() { let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1651,7 +1683,7 @@ fn test_monitor_update_fail_claim() { let route; { let net_graph_msg_handler = &nodes[2].net_graph_msg_handler; - route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, &logger).unwrap(); + route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[2], 1); } @@ -1702,20 +1734,30 @@ fn test_monitor_update_fail_claim() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { assert_eq!(payment_hash_2, *payment_hash); - assert!(payment_preimage.is_none()); - assert_eq!(payment_secret_2, *payment_secret); assert_eq!(1_000_000, amt); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_2, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } }, _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => { + Event::PaymentReceived { ref payment_hash, ref purpose, amt } => { assert_eq!(payment_hash_3, *payment_hash); - assert!(payment_preimage.is_none()); - assert_eq!(payment_secret_3, *payment_secret); assert_eq!(1_000_000, amt); + match &purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(payment_secret_3, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::InvoicePayment") + } }, _ => panic!("Unexpected event"), } @@ -1751,7 +1793,7 @@ fn test_monitor_update_on_pending_forwards() { let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]); { let net_graph_msg_handler = &nodes[2].net_graph_msg_handler; - let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[2], 1); } @@ -1780,7 +1822,7 @@ fn test_monitor_update_on_pending_forwards() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); - if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] { + if let Event::PaymentPathFailed { payment_hash, rejected_by_dest, .. } = events[0] { assert_eq!(payment_hash, payment_hash_1); assert!(rejected_by_dest); } else { panic!("Unexpected event!"); } @@ -1808,13 +1850,13 @@ fn monitor_update_claim_fail_no_response() { let logger = test_utils::TestLogger::new(); // Forward a payment for B to claim - let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1850,8 +1892,9 @@ fn monitor_update_claim_fail_no_response() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentSent { ref payment_preimage } => { + Event::PaymentSent { ref payment_preimage, ref payment_hash } => { assert_eq!(*payment_preimage, payment_preimage_1); + assert_eq!(*payment_hash, payment_hash_1); }, _ => panic!("Unexpected event"), } @@ -1912,7 +1955,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: // Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1948,6 +1991,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: send_payment(&nodes[0], &[&nodes[1]], 8000000); close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); } #[test] @@ -1973,7 +2018,7 @@ fn test_path_paused_mpp() { let logger = test_utils::TestLogger::new(); let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]); - let mut route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let mut route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3: let path = route.paths[0].clone(); @@ -2004,7 +2049,7 @@ fn test_path_paused_mpp() { // Pass the first HTLC of the payment along to nodes[3]. let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), payment_secret, events.pop().unwrap(), false); + pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false, None); // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. @@ -2012,11 +2057,200 @@ fn test_path_paused_mpp() { nodes[0].node.channel_monitor_updated(&outpoint, latest_update); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), payment_secret, events.pop().unwrap(), true); + pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None); claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage); } +#[test] +fn test_pending_update_fee_ack_on_reconnect() { + // In early versions of our automated fee update patch, nodes did not correctly use the + // previous channel feerate after sending an undelivered revoke_and_ack when re-sending an + // undelivered commitment_signed. + // + // B sends A new HTLC + CS, not delivered + // A sends B update_fee + CS + // B receives the CS and sends RAA, previously causing B to lock in the new feerate + // reconnect + // B resends initial CS, using the original fee + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + send_payment(&nodes[0], &[&nodes[1]], 100_000_00); + + let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]); + let route = get_route(&nodes[1].node.get_our_node_id(), &nodes[1].net_graph_msg_handler.network_graph, + &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, nodes[1].logger).unwrap(); + nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[1], 1); + let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + // bs_initial_send_msgs are not delivered until they are re-generated after reconnect + + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock *= 2; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert!(as_update_fee_msgs.update_fee.is_some()); + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_update_fee_msgs.update_fee.as_ref().unwrap()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed); + check_added_monitors!(nodes[1], 1); + let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + // bs_first_raa is not delivered until it is re-generated after reconnect + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() }); + let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() }); + let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg); + let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(bs_resend_msgs.len(), 3); + if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = bs_resend_msgs[0] { + assert_eq!(*updates, bs_initial_send_msgs); + } else { panic!(); } + if let MessageSendEvent::SendRevokeAndACK { ref msg, .. } = bs_resend_msgs[1] { + assert_eq!(*msg, bs_first_raa); + } else { panic!(); } + if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { } else { panic!(); } + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg); + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.update_add_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed); + check_added_monitors!(nodes[0], 1); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id())); + check_added_monitors!(nodes[1], 1); + let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed; + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa); + check_added_monitors!(nodes[0], 1); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed); + check_added_monitors!(nodes[1], 1); + let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs); + check_added_monitors!(nodes[0], 1); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa); + check_added_monitors!(nodes[0], 1); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id())); + check_added_monitors!(nodes[1], 1); + + expect_pending_htlcs_forwardable!(nodes[0]); + expect_payment_received!(nodes[0], payment_hash, payment_secret, 1_000_000); + + claim_payment(&nodes[1], &[&nodes[0]], payment_preimage); +} + +fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { + // In early versions we did not handle resending of update_fee on reconnect correctly. The + // chanmon_consistency fuzz target, of course, immediately found it, but we test a few cases + // explicitly here. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + send_payment(&nodes[0], &[&nodes[1]], 1000); + + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert!(update_msgs.update_fee.is_some()); + if deliver_update { + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap()); + } + + if parallel_updates { + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() }); + let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() }); + let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg); + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg); + let mut as_reconnect_msgs = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(as_reconnect_msgs.len(), 2); + if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() {} else { panic!(); } + let update_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap() + { updates } else { panic!(); }; + assert!(update_msgs.update_fee.is_some()); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap()); + if parallel_updates { + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed); + check_added_monitors!(nodes[1], 1); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa); + check_added_monitors!(nodes[0], 1); + let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs); + check_added_monitors!(nodes[0], 1); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_second_update.update_fee.as_ref().unwrap()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed); + check_added_monitors!(nodes[1], 1); + let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa); + let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa); + check_added_monitors!(nodes[0], 1); + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed); + check_added_monitors!(nodes[0], 1); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa); + check_added_monitors!(nodes[1], 1); + } else { + commitment_signed_dance!(nodes[1], nodes[0], update_msgs.commitment_signed, false); + } + + send_payment(&nodes[0], &[&nodes[1]], 1000); +} +#[test] +fn update_fee_resend_test() { + do_update_fee_resend_test(false, false); + do_update_fee_resend_test(true, false); + do_update_fee_resend_test(false, true); + do_update_fee_resend_test(true, true); +} + fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we // properly free them on reconnect. We previously failed such HTLCs upon serialization, but @@ -2057,7 +2291,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let route = { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, None, &Vec::new(), 100000, TEST_FINAL_CLTV, nodes[0].logger).unwrap() + get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), None, None, &Vec::new(), 100000, TEST_FINAL_CLTV, nodes[0].logger).unwrap() }; nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap(); @@ -2215,3 +2449,316 @@ fn channel_holding_cell_serialize() { do_channel_holding_cell_serialize(true, false); do_channel_holding_cell_serialize(false, true); // last arg doesn't matter } + +#[derive(PartialEq)] +enum HTLCStatusAtDupClaim { + Received, + HoldingCell, + Cleared, +} +fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_fails: bool) { + // When receiving an update_fulfill_htlc message, we immediately forward the claim backwards + // along the payment path before waiting for a full commitment_signed dance. This is great, but + // can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects, + // reconnects, and then has to re-send its update_fulfill_htlc message again. + // In previous code, we didn't handle the double-claim correctly, spuriously closing the + // channel on which the inbound HTLC was received. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()).2; + + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000); + + let mut as_raa = None; + if htlc_status == HTLCStatusAtDupClaim::HoldingCell { + // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be + // awaiting a remote revoke_and_ack from nodes[0]. + let (_, second_payment_hash, second_payment_secret) = get_payment_preimage_hash!(nodes[1]); + let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, + &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, nodes[1].logger).unwrap(); + nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg); + check_added_monitors!(nodes[1], 1); + + let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); + check_added_monitors!(nodes[0], 1); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs); + check_added_monitors!(nodes[0], 1); + + as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id())); + } + + let fulfill_msg = msgs::UpdateFulfillHTLC { + channel_id: chan_2, + htlc_id: 0, + payment_preimage, + }; + if second_fails { + assert!(nodes[2].node.fail_htlc_backwards(&payment_hash)); + expect_pending_htlcs_forwardable!(nodes[2]); + check_added_monitors!(nodes[2], 1); + get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + } else { + assert!(nodes[2].node.claim_funds(payment_preimage)); + check_added_monitors!(nodes[2], 1); + let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1); + // Check that the message we're about to deliver matches the one generated: + assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]); + } + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg); + expect_payment_forwarded!(nodes[1], Some(1000), false); + check_added_monitors!(nodes[1], 1); + + let mut bs_updates = None; + if htlc_status != HTLCStatusAtDupClaim::HoldingCell { + bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); + assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]); + expect_payment_sent!(nodes[0], payment_preimage); + if htlc_status == HTLCStatusAtDupClaim::Cleared { + commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false); + } + } else { + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + } + + nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false); + nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + + if second_fails { + reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); + expect_pending_htlcs_forwardable!(nodes[1]); + } else { + reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + if htlc_status == HTLCStatusAtDupClaim::HoldingCell { + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap()); + check_added_monitors!(nodes[1], 1); + expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it + + bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); + assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]); + expect_payment_sent!(nodes[0], payment_preimage); + } + if htlc_status != HTLCStatusAtDupClaim::Cleared { + commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false); + } +} + +#[test] +fn test_reconnect_dup_htlc_claims() { + do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, false); + do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, false); + do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, false); + do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, true); + do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true); + do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true); +} + +#[test] +fn test_temporary_error_during_shutdown() { + // Test that temporary failures when updating the monitor's shutdown script delay cooperative + // close. + let mut config = test_default_channel_config(); + config.channel_options.commit_upfront_shutdown_pubkey = false; + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + + nodes[0].node.close_channel(&channel_id).unwrap(); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id())); + check_added_monitors!(nodes[0], 1); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[0].chain_monitor.update_ret.lock().unwrap() = None; + *nodes[1].chain_monitor.update_ret.lock().unwrap() = None; + + let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id())); + + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[1].chain_monitor.update_ret.lock().unwrap() = None; + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); + let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap()); + let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(none_b.is_none()); + let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + + assert_eq!(txn_a, txn_b); + assert_eq!(txn_a.len(), 1); + check_spends!(txn_a[0], funding_tx); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); +} + +#[test] +fn test_permanent_error_during_sending_shutdown() { + // Test that permanent failures when updating the monitor's shutdown script result in a force + // close when initiating a cooperative close. + let mut config = test_default_channel_config(); + config.channel_options.commit_upfront_shutdown_pubkey = false; + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure)); + + assert!(nodes[0].node.close_channel(&channel_id).is_ok()); + check_closed_broadcast!(nodes[0], true); + check_added_monitors!(nodes[0], 2); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }); +} + +#[test] +fn test_permanent_error_during_handling_shutdown() { + // Test that permanent failures when updating the monitor's shutdown script result in a force + // close when handling a cooperative close. + let mut config = test_default_channel_config(); + config.channel_options.commit_upfront_shutdown_pubkey = false; + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::PermanentFailure)); + + assert!(nodes[0].node.close_channel(&channel_id).is_ok()); + let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &shutdown); + check_closed_broadcast!(nodes[1], true); + check_added_monitors!(nodes[1], 2); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }); +} + +#[test] +fn double_temp_error() { + // Test that it's OK to have multiple `ChainMonitor::update_channel` calls fail in a row. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + + let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (payment_preimage_2, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + // `claim_funds` results in a ChannelMonitorUpdate. + assert!(nodes[1].node.claim_funds(payment_preimage_1)); + check_added_monitors!(nodes[1], 1); + let (funding_tx, latest_update_1) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, + // which had some asserts that prevented it from being called twice. + assert!(nodes[1].node.claim_funds(payment_preimage_2)); + check_added_monitors!(nodes[1], 1); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(())); + + let (_, latest_update_2) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&funding_tx, latest_update_1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 0); + nodes[1].node.channel_monitor_updated(&funding_tx, latest_update_2); + + // Complete the first HTLC. + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let (update_fulfill_1, commitment_signed_b1, node_id) = { + match &events[0] { + &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + (update_fulfill_htlcs[0].clone(), commitment_signed.clone(), node_id.clone()) + }, + _ => panic!("Unexpected event"), + } + }; + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1); + check_added_monitors!(nodes[0], 0); + expect_payment_sent!(nodes[0], payment_preimage_1); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1); + check_added_monitors!(nodes[0], 1); + nodes[0].node.process_pending_htlc_forwards(); + let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + check_added_monitors!(nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_a1); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed_a1); + check_added_monitors!(nodes[1], 1); + + // Complete the second HTLC. + let ((update_fulfill_2, commitment_signed_b2), raa_b2) = { + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + (match &events[0] { + MessageSendEvent::UpdateHTLCs { node_id, updates } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone()) + }, + _ => panic!("Unexpected event"), + }, + match events[1] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + (*msg).clone() + }, + _ => panic!("Unexpected event"), + }) + }; + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_b2); + check_added_monitors!(nodes[0], 1); + + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_2); + check_added_monitors!(nodes[0], 0); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false); + expect_payment_sent!(nodes[0], payment_preimage_2); +}