use util::scid_utils;
use util::test_utils;
use util::test_utils::{panicking, TestChainMonitor};
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
+use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
use util::errors::APIError;
use util::config::UserConfig;
use util::ser::{ReadableArgs, Writeable};
use alloc::rc::Rc;
use sync::{Arc, Mutex};
use core::mem;
+use core::iter::repeat;
pub const CHAN_CONFIRM_DEPTH: u32 = 10;
}
}
+ let broadcaster = test_utils::TestBroadcaster {
+ txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
+ blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())),
+ };
+
// Before using all the new monitors to check the watch outpoints, use the full set of
// them to ensure we can write and reload our ChannelManager.
{
keys_manager: self.keys_manager,
fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
chain_monitor: self.chain_monitor,
- tx_broadcaster: &test_utils::TestBroadcaster {
- txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
- blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())),
- },
+ tx_broadcaster: &broadcaster,
logger: &self.logger,
channel_monitors,
}).unwrap();
}
let persister = test_utils::TestPersister::new();
- let broadcaster = test_utils::TestBroadcaster {
- txn_broadcasted: Mutex::new(self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone()),
- blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())),
- };
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager);
for deserialized_monitor in deserialized_monitors.drain(..) {
{
commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
if $fail_backwards {
- $crate::expect_pending_htlcs_forwardable!($node_a);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!($node_a, vec![$crate::util::events::HTLCDestination::NextHopChannel{ node_id: Some($node_b.node.get_our_node_id()), channel_id: $commitment_signed.channel_id }]);
check_added_monitors!($node_a, 1);
let channel_state = $node_a.node.channel_state.lock().unwrap();
}
#[macro_export]
-/// Clears (and ignores) a PendingHTLCsForwardable event
-macro_rules! expect_pending_htlcs_forwardable_ignore {
- ($node: expr) => {{
+macro_rules! expect_pending_htlcs_forwardable_conditions {
+ ($node: expr, $expected_failures: expr) => {{
+ let expected_failures = $expected_failures;
let events = $node.node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
match events[0] {
$crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
_ => panic!("Unexpected event"),
};
+
+ let count = expected_failures.len() + 1;
+ assert_eq!(events.len(), count);
+
+ if expected_failures.len() > 0 {
+ expect_htlc_handling_failed_destinations!(events, expected_failures)
+ }
}}
}
+#[macro_export]
+macro_rules! expect_htlc_handling_failed_destinations {
+ ($events: expr, $expected_failures: expr) => {{
+ for event in $events {
+ match event {
+ $crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
+ $crate::util::events::Event::HTLCHandlingFailed { ref failed_next_destination, .. } => {
+ assert!($expected_failures.contains(&failed_next_destination))
+ },
+ _ => panic!("Unexpected destination"),
+ }
+ }
+ }}
+}
+
+#[macro_export]
+/// Clears (and ignores) a PendingHTLCsForwardable event
+macro_rules! expect_pending_htlcs_forwardable_ignore {
+ ($node: expr) => {{
+ expect_pending_htlcs_forwardable_conditions!($node, vec![]);
+ }};
+}
+
+#[macro_export]
+/// Clears (and ignores) PendingHTLCsForwardable and HTLCHandlingFailed events
+macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore {
+ ($node: expr, $expected_failures: expr) => {{
+ expect_pending_htlcs_forwardable_conditions!($node, $expected_failures);
+ }};
+}
+
#[macro_export]
/// Handles a PendingHTLCsForwardable event
macro_rules! expect_pending_htlcs_forwardable {
($node: expr) => {{
- $crate::expect_pending_htlcs_forwardable_ignore!($node);
+ expect_pending_htlcs_forwardable_ignore!($node);
+ $node.node.process_pending_htlc_forwards();
+
+ // Ensure process_pending_htlc_forwards is idempotent.
+ $node.node.process_pending_htlc_forwards();
+ }};
+}
+
+#[macro_export]
+/// Handles a PendingHTLCsForwardable and HTLCHandlingFailed event
+macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed {
+ ($node: expr, $expected_failures: expr) => {{
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($node, $expected_failures);
$node.node.process_pending_htlc_forwards();
// Ensure process_pending_htlc_forwards is idempotent.
let mut events = node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
let expected_payment_id = match events.pop().unwrap() {
- Event::PaymentPathFailed { payment_hash, rejected_by_dest, path, retry, payment_id, network_update,
+ Event::PaymentPathFailed { payment_hash, rejected_by_dest, path, retry, payment_id, network_update, short_channel_id,
#[cfg(test)]
error_code,
#[cfg(test)]
assert!(retry.is_some(), "expected retry.is_some()");
assert_eq!(retry.as_ref().unwrap().final_value_msat, path.last().unwrap().fee_msat, "Retry amount should match last hop in path");
assert_eq!(retry.as_ref().unwrap().payment_params.payee_pubkey, path.last().unwrap().pubkey, "Retry payee node_id should match last hop in path");
+ if let Some(scid) = short_channel_id {
+ assert!(retry.as_ref().unwrap().payment_params.previously_failed_channels.contains(&scid));
+ }
#[cfg(test)]
{
($node: expr, $prev_node: expr, $next_node: expr, $new_msgs: expr) => {
{
$node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
- let fee = $node.node.channel_state.lock().unwrap()
- .by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap()
- .config.options.forwarding_fee_base_msat;
+ let fee = {
+ let channel_state = $node.node.channel_state.lock().unwrap();
+ let channel = channel_state
+ .by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap();
+ if let Some(prev_config) = channel.prev_config() {
+ prev_config.forwarding_fee_base_msat
+ } else {
+ channel.config().forwarding_fee_base_msat
+ }
+ };
expect_payment_forwarded!($node, $next_node, $prev_node, Some(fee as u64), false, false);
expected_total_fee_msat += fee as u64;
check_added_monitors!($node, 1);
assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
}
expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash);
- expect_pending_htlcs_forwardable!(expected_paths[0].last().unwrap());
+ let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::FailedPayment { payment_hash: our_payment_hash }).take(expected_paths.len()).collect();
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(expected_paths[0].last().unwrap(), expected_destinations);
pass_failed_payment_back(origin_node, expected_paths, skip_last, our_payment_hash);
}
node.node.handle_update_fail_htlc(&prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
commitment_signed_dance!(node, prev_node, next_msgs.as_ref().unwrap().1, update_next_node);
if !update_next_node {
- expect_pending_htlcs_forwardable!(node);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node, vec![HTLCDestination::NextHopChannel { node_id: Some(prev_node.node.get_our_node_id()), channel_id: next_msgs.as_ref().unwrap().0.channel_id }]);
}
}
let events = node.node.get_and_clear_pending_msg_events();