In `ChannelManager::fail_htlc_backwards_internal`, we push a `HTLCHandlingFailed`
containing some information about the HTLC
self.chain_monitor.update_channel(funding_txo, update)
}
- fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
+ fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
return self.chain_monitor.release_pending_monitor_events();
}
}
use sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
use core::ops::Deref;
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+use bitcoin::secp256k1::PublicKey;
#[derive(Clone, Copy, Hash, PartialEq, Eq)]
/// A specific update's ID stored in a `MonitorUpdateId`, separated out to make the contents
persister: P,
/// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
/// from the user and not from a [`ChannelMonitor`].
- pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>)>>,
+ pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>>,
/// The best block height seen, used as a proxy for the passage of time.
highest_chain_height: AtomicUsize,
}
log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
Err(ChannelMonitorUpdateErr::PermanentFailure) => {
monitor_state.channel_perm_failed.store(true, Ordering::Release);
- self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)]));
+ self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
},
Err(ChannelMonitorUpdateErr::TemporaryFailure) => {
log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
funding_txo,
monitor_update_id: monitor_data.monitor.get_latest_update_id(),
- }]));
+ }], monitor_data.monitor.get_counterparty_node_id()));
},
MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => {
if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) {
/// channel_monitor_updated once with the highest ID.
#[cfg(any(test, fuzzing))]
pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
+ let monitors = self.monitors.read().unwrap();
+ let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
funding_txo,
monitor_update_id,
- }]));
+ }], counterparty_node_id));
}
#[cfg(any(test, fuzzing, feature = "_test_utils"))]
}
}
- fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
+ fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
for monitor_state in self.monitors.read().unwrap().values() {
let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap());
let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
if monitor_events.len() > 0 {
let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
- pending_monitor_events.push((monitor_outpoint, monitor_events));
+ let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
+ pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id));
}
}
}
self.inner.lock().unwrap().get_cur_holder_commitment_number()
}
+ pub(crate) fn get_counterparty_node_id(&self) -> Option<PublicKey> {
+ self.inner.lock().unwrap().counterparty_node_id
+ }
+
/// Used by ChannelManager deserialization to broadcast the latest holder state if its copy of
/// the Channel was out-of-date. You may use it to get a broadcastable holder toxic tx in case of
/// fallen-behind, i.e when receiving a channel_reestablish with a proof that our counterparty side knows
use bitcoin::blockdata::transaction::{Transaction, TxOut};
use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::network::constants::Network;
+use bitcoin::secp256k1::PublicKey;
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent};
use chain::keysinterface::Sign;
///
/// For details on asynchronous [`ChannelMonitor`] updating and returning
/// [`MonitorEvent::UpdateCompleted`] here, see [`ChannelMonitorUpdateErr::TemporaryFailure`].
- fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)>;
+ fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>;
}
/// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to
use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
use util::config::UserConfig;
use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
use util::errors::APIError;
use util::ser::{ReadableArgs, Writeable};
use util::test_utils::TestBroadcaster;
// Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
nodes[2].node.fail_htlc_backwards(&payment_hash_1);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
// Rebalance a bit so that we can send backwards from 3 to 1.
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
nodes[2].node.fail_htlc_backwards(&payment_hash_1);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
check_added_monitors!(nodes[2], 1);
let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_closed_broadcast!(nodes[1], true);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
check_added_monitors!(nodes[1], 1);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
};
if second_fails {
nodes[2].node.fail_htlc_backwards(&payment_hash);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors!(nodes[2], 1);
get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
} else {
if second_fails {
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
} else {
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
}
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
/// immediately (others we will have to allow to time out).
- pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>) {
+ pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>) {
// Note that we MUST only generate a monitor update that indicates force-closure - we're
// called during initialization prior to the chain_monitor in the encompassing ChannelManager
// being fully configured in some cases. Thus, its likely any monitor events we generate will
// We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
// return them to fail the payment.
let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
+ let counterparty_node_id = self.get_counterparty_node_id();
for htlc_update in self.holding_cell_htlc_updates.drain(..) {
match htlc_update {
HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
- dropped_outbound_htlcs.push((source, payment_hash));
+ dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
},
_ => {}
}
use ln::wire::Encode;
use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Recipient};
use util::config::{UserConfig, ChannelConfig};
-use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
use util::{byte_utils, events};
use util::scid_utils::fake_scid;
use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
DuplicateClaim,
}
-type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
+type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
};
for htlc_source in failed_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
let _ = handle_error!(self, result, *counterparty_node_id);
let (monitor_update_option, mut failed_htlcs) = shutdown_res;
log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
for htlc_source in failed_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: channel_id };
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
if let Some((funding_txo, monitor_update)) = monitor_update_option {
// There isn't anything we can do if we get an update failure - we're already
HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
prev_funding_outpoint } => {
+ macro_rules! failure_handler {
+ ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
+ log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: incoming_shared_secret,
+ phantom_shared_secret: $phantom_ss,
+ });
+
+ let reason = if $next_hop_unknown {
+ HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
+ } else {
+ HTLCDestination::FailedPayment{ payment_hash }
+ };
+
+ failed_forwards.push((htlc_source, payment_hash,
+ HTLCFailReason::Reason { failure_code: $err_code, data: $err_data },
+ reason
+ ));
+ continue;
+ }
+ }
macro_rules! fail_forward {
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
{
- log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
- let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: incoming_shared_secret,
- phantom_shared_secret: $phantom_ss,
- });
- failed_forwards.push((htlc_source, payment_hash,
- HTLCFailReason::Reason { failure_code: $err_code, data: $err_data }
- ));
- continue;
+ failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
+ }
+ }
+ }
+ macro_rules! failed_payment {
+ ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
+ {
+ failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
}
}
}
// `update_fail_malformed_htlc`, meaning here we encrypt the error as
// if it came from us (the second-to-last hop) but contains the sha256
// of the onion.
- fail_forward!(err_msg, err_code, sha256_of_onion.to_vec(), None);
+ failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
},
Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
- fail_forward!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
+ failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
},
};
match next_hop {
onion_utils::Hop::Receive(hop_data) => {
match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value, Some(phantom_shared_secret)) {
Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])),
- Err(ReceiveError { err_code, err_data, msg }) => fail_forward!(msg, err_code, err_data, Some(phantom_shared_secret))
+ Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
}
},
_ => panic!(),
}
let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
failed_forwards.push((htlc_source, payment_hash,
- HTLCFailReason::Reason { failure_code, data }
+ HTLCFailReason::Reason { failure_code, data },
+ HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
));
continue;
},
};
macro_rules! fail_htlc {
- ($htlc: expr) => {
+ ($htlc: expr, $payment_hash: expr) => {
let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec();
htlc_msat_height_data.extend_from_slice(
&byte_utils::be32_to_array(self.best_block.read().unwrap().height()),
incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
phantom_shared_secret,
}), payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }
+ HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
+ HTLCDestination::FailedPayment { payment_hash: $payment_hash },
));
}
}
if htlcs.len() == 1 {
if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
continue
}
}
if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat {
log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
} else if total_value == $payment_data.total_msat {
htlcs.push(claimable_htlc);
new_events.push(events::Event::PaymentReceived {
let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
Ok(payment_preimage) => payment_preimage,
Err(()) => {
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
continue
}
};
},
hash_map::Entry::Occupied(_) => {
log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
}
}
}
hash_map::Entry::Occupied(inbound_payment) => {
if payment_data.is_none() {
log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
continue
};
let payment_data = payment_data.unwrap();
if inbound_payment.get().payment_secret != payment_data.payment_secret {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
} else if inbound_payment.get().min_value_msat.is_some() && payment_data.total_msat < inbound_payment.get().min_value_msat.unwrap() {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).",
log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
} else {
let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
if payment_received_generated {
}
}
- for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason);
+ for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason, destination);
}
self.forward_htlcs(&mut phantom_receives);
}
for htlc_source in timed_out_mpp_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() });
+ let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
}
for (err, counterparty_node_id) in handle_errors.drain(..) {
self.best_block.read().unwrap().height()));
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data });
+ HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
+ HTLCDestination::FailedPayment { payment_hash: *payment_hash });
}
}
}
// be surfaced to the user.
fn fail_holding_cell_htlcs(
&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32],
- _counterparty_node_id: &PublicKey
+ counterparty_node_id: &PublicKey
) {
for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
match htlc_src {
hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
};
let channel_state = self.channel_state.lock().unwrap();
- self.fail_htlc_backwards_internal(channel_state,
- htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
+
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
+ self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver)
},
HTLCSource::OutboundRoute { session_priv, payment_id, path, payment_params, .. } => {
let mut session_priv_bytes = [0; 32];
/// to fail and take the channel_state lock for each iteration (as we take ownership and may
/// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
/// still-available channels.
- fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
+ fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason, destination: HTLCDestination) {
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
//identify whether we sent it or not based on the (I presume) very different runtime
//between the branches here. We should make this async and move it into the forward HTLCs
pending_events.push(path_failure);
if let Some(ev) = full_failure_ev { pending_events.push(ev); }
},
- HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, .. }) => {
+ HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, outpoint }) => {
let err_packet = match onion_error {
HTLCFailReason::Reason { failure_code, data } => {
log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
}
}
mem::drop(channel_state_lock);
+ let mut pending_events = self.pending_events.lock().unwrap();
if let Some(time) = forward_event {
- let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push(events::Event::PendingHTLCsForwardable {
time_forwardable: time
});
}
+ pending_events.push(events::Event::HTLCHandlingFailed {
+ prev_channel_id: outpoint.to_channel_id(),
+ failed_next_destination: destination
+ });
},
}
}
self.best_block.read().unwrap().height()));
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
+ HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data },
+ HTLCDestination::FailedPayment { payment_hash } );
} else {
match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let chan_restoration_res;
- let (mut pending_failures, finalized_claims) = {
+ let (mut pending_failures, finalized_claims, counterparty_node_id) = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
return;
}
+ let counterparty_node_id = channel.get().get_counterparty_node_id();
let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height());
let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
// We only send a channel_update in the case where we are just now sending a
if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd);
}
- (updates.failed_htlcs, updates.finalized_claimed_htlcs)
+
+ (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
};
post_handle_chan_restoration!(self, chan_restoration_res);
self.finalize_claims(finalized_claims);
for failure in pending_failures.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
}
}
}
};
for htlc_source in dropped_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
let _ = handle_error!(self, result, *counterparty_node_id);
short_channel_id, channel_outpoint)) =>
{
for failure in pending_failures.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
}
self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
self.finalize_claims(finalized_claim_htlcs);
let mut failed_channels = Vec::new();
let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
let has_pending_monitor_events = !pending_monitor_events.is_empty();
- for (funding_outpoint, mut monitor_events) in pending_monitor_events.drain(..) {
+ for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
for monitor_event in monitor_events.drain(..) {
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
},
MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason {
failure_code, data,
- }));
+ }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
}
if let Some(channel_ready) = channel_ready_opt {
send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready);
if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
+
timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
failure_code: 0x4000 | 15,
data: htlc_msat_height_data
- }));
+ }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
false
} else { true }
});
self.handle_init_event_channel_failures(failed_channels);
- for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
+ for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason, destination);
}
}
};
for htlc_source in failed_htlcs.drain(..) {
- channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
+ channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
use ln::msgs::ChannelMessageHandler;
use routing::router::{PaymentParameters, RouteParameters, find_route};
use util::errors::APIError;
- use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+ use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use util::test_utils;
use chain::keysinterface::KeysInterface;
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+ // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
+ // fails), the second will process the resulting failure and fail the HTLC backward
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
use alloc::rc::Rc;
use sync::{Arc, Mutex};
use core::mem;
+use core::iter::repeat;
pub const CHAN_CONFIRM_DEPTH: u32 = 10;
{
commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
if $fail_backwards {
- $crate::expect_pending_htlcs_forwardable!($node_a);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!($node_a, vec![$crate::util::events::HTLCDestination::NextHopChannel{ node_id: Some($node_b.node.get_our_node_id()), channel_id: $commitment_signed.channel_id }]);
check_added_monitors!($node_a, 1);
let channel_state = $node_a.node.channel_state.lock().unwrap();
}}
}
-pub struct HTLCHandlingFailedConditions {
- pub expected_destinations: Vec<HTLCDestination>,
-}
-
-impl HTLCHandlingFailedConditions {
- pub fn new() -> Self {
- Self {
- expected_destinations: vec![],
- }
- }
-
- pub fn with_reason(mut self, reason: HTLCDestination) -> Self {
- self.expected_destinations = vec![reason];
- self
- }
-
- pub fn with_reasons(mut self, reasons: Vec<HTLCDestination>) -> Self {
- self.expected_destinations = reasons;
- self
- }
-}
-
#[macro_export]
macro_rules! expect_pending_htlcs_forwardable_conditions {
- ($node: expr, $conditions: expr) => {{
- let conditions = $conditions;
+ ($node: expr, $expected_failures: expr) => {{
+ let expected_failures = $expected_failures;
let events = $node.node.get_and_clear_pending_events();
match events[0] {
$crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
_ => panic!("Unexpected event"),
};
- let count = conditions.expected_destinations.len() + 1;
+ let count = expected_failures.len() + 1;
assert_eq!(events.len(), count);
- if conditions.expected_destinations.len() > 0 {
- expect_htlc_handling_failed_destinations!(events, conditions.expected_destinations)
+ if expected_failures.len() > 0 {
+ expect_htlc_handling_failed_destinations!(events, expected_failures)
}
}}
}
#[macro_export]
macro_rules! expect_htlc_handling_failed_destinations {
- ($events: expr, $destinations: expr) => {{
+ ($events: expr, $expected_failures: expr) => {{
for event in $events {
match event {
$crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
$crate::util::events::Event::HTLCHandlingFailed { ref failed_next_destination, .. } => {
- assert!($destinations.contains(&failed_next_destination))
+ assert!($expected_failures.contains(&failed_next_destination))
},
_ => panic!("Unexpected destination"),
}
/// Clears (and ignores) a PendingHTLCsForwardable event
macro_rules! expect_pending_htlcs_forwardable_ignore {
($node: expr) => {{
- expect_pending_htlcs_forwardable_conditions!($node, $crate::ln::functional_test_utils::HTLCHandlingFailedConditions::new());
+ expect_pending_htlcs_forwardable_conditions!($node, vec![]);
}};
}
#[macro_export]
/// Clears (and ignores) PendingHTLCsForwardable and HTLCHandlingFailed events
macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore {
- ($node: expr, $conditions: expr) => {{
- expect_pending_htlcs_forwardable_conditions!($node, $conditions);
+ ($node: expr, $expected_failures: expr) => {{
+ expect_pending_htlcs_forwardable_conditions!($node, $expected_failures);
}};
}
#[macro_export]
/// Handles a PendingHTLCsForwardable and HTLCHandlingFailed event
macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed {
- ($node: expr, $conditions: expr) => {{
- expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($node, $conditions);
+ ($node: expr, $expected_failures: expr) => {{
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($node, $expected_failures);
$node.node.process_pending_htlc_forwards();
// Ensure process_pending_htlc_forwards is idempotent.
assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
}
expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash);
- expect_pending_htlcs_forwardable!(expected_paths[0].last().unwrap());
+ let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::FailedPayment { payment_hash: our_payment_hash }).take(expected_paths.len()).collect();
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(expected_paths[0].last().unwrap(), expected_destinations);
pass_failed_payment_back(origin_node, expected_paths, skip_last, our_payment_hash);
}
node.node.handle_update_fail_htlc(&prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
commitment_signed_dance!(node, prev_node, next_msgs.as_ref().unwrap().1, update_next_node);
if !update_next_node {
- expect_pending_htlcs_forwardable!(node);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node, vec![HTLCDestination::NextHopChannel { node_id: Some(prev_node.node.get_our_node_id()), channel_id: next_msgs.as_ref().unwrap().0.channel_id }]);
}
}
let events = node.node.get_and_clear_pending_msg_events();
use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ErrorAction};
use util::enforcing_trait_impls::EnforcingSigner;
use util::{byte_utils, test_utils};
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
use util::errors::APIError;
use util::ser::{Writeable, ReadableArgs};
use util::config::UserConfig;
use prelude::*;
use alloc::collections::BTreeSet;
use core::default::Default;
+use core::iter::repeat;
use sync::{Arc, Mutex};
use ln::functional_test_utils::*;
// We have to forward pending HTLCs twice - once tries to forward the payment forward (and
// fails), the second will process the resulting failure and fail the HTLC backward.
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let mut events = nodes[0].node.get_and_clear_pending_events();
expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
- match events[1] {
+ match events.last().unwrap() {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
_ => panic!("Unexpected event"),
}
check_spends!(commitment_tx[0], chan_2.3);
nodes[2].node.fail_htlc_backwards(&payment_hash);
check_added_monitors!(nodes[2], 0);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
check_added_monitors!(nodes[2], 1);
let events = nodes[2].node.get_and_clear_pending_msg_events();
}
}
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
nodes[2].node.fail_htlc_backwards(&first_payment_hash);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
// Drop the last RAA from 3 -> 2
nodes[2].node.fail_htlc_backwards(&second_payment_hash);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
check_added_monitors!(nodes[2], 1);
nodes[2].node.fail_htlc_backwards(&third_payment_hash);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
// commitment transaction for nodes[0] until process_pending_htlc_forwards().
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
+ assert_eq!(events.len(), 2);
match events[0] {
Event::PendingHTLCsForwardable { .. } => { },
_ => panic!("Unexpected event"),
};
+ match events[1] {
+ Event::HTLCHandlingFailed { .. } => { },
+ _ => panic!("Unexpected event"),
+ }
// Deliberately don't process the pending fail-back so they all fail back at once after
// block connection just like the !deliver_bs_raa case
}
assert!(ANTI_REORG_DELAY > PAYMENT_EXPIRY_BLOCKS); // We assume payments will also expire
let events = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 4 });
+ assert_eq!(events.len(), if deliver_bs_raa { 2 + (nodes.len() - 1) } else { 4 + nodes.len() });
match events[0] {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
_ => panic!("Unexepected event"),
connect_block(&nodes[1], &block);
}
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
check_added_monitors!(nodes[1], 1);
let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
connect_blocks(&nodes[1], 1);
if forwarded_htlc {
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(fail_commit.len(), 1);
mine_transaction(&nodes[1], &htlc_timeout_tx);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(htlc_updates.update_add_htlcs.is_empty());
assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
&[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
- let chan = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
+ let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
+ let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+ let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
+ let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
+ let chan_3_5 = create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
// Rebalance and check output sanity...
send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
- assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 2);
+ assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
- let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
+ let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis;
// 0th HTLC:
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
// 1st HTLC:
// Double-check that six of the new HTLC were added
// We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
// with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
- assert_eq!(get_local_commitment_txn!(nodes[3], chan.2).len(), 1);
- assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 8);
+ assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
+ assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
// Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
// Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
nodes[4].node.fail_htlc_backwards(&payment_hash_5);
nodes[4].node.fail_htlc_backwards(&payment_hash_6);
check_added_monitors!(nodes[4], 0);
- expect_pending_htlcs_forwardable!(nodes[4]);
+
+ let failed_destinations = vec![
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
check_added_monitors!(nodes[4], 1);
let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
nodes[5].node.fail_htlc_backwards(&payment_hash_2);
nodes[5].node.fail_htlc_backwards(&payment_hash_4);
check_added_monitors!(nodes[5], 0);
- expect_pending_htlcs_forwardable!(nodes[5]);
+
+ let failed_destinations_2 = vec![
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
check_added_monitors!(nodes[5], 1);
let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
- let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
-
- expect_pending_htlcs_forwardable!(nodes[3]);
+ let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
+
+ // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
+ let failed_destinations_3 = vec![
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
check_added_monitors!(nodes[3], 1);
let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
//
// Alternatively, we may broadcast the previous commitment transaction, which should only
// result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
- let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
+ let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
if announce_latest {
mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
}
let events = nodes[2].node.get_and_clear_pending_events();
let close_event = if deliver_last_raa {
- assert_eq!(events.len(), 2);
- events[1].clone()
+ assert_eq!(events.len(), 2 + 6);
+ events.last().clone().unwrap()
} else {
assert_eq!(events.len(), 1);
- events[0].clone()
+ events.last().clone().unwrap()
};
match close_event {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
check_closed_broadcast!(nodes[2], true);
if deliver_last_raa {
expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+
+ let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
+ expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
} else {
- expect_pending_htlcs_forwardable!(nodes[2]);
+ let expected_destinations: Vec<HTLCDestination> = if announce_latest {
+ repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
+ } else {
+ repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
+ };
+
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
}
check_added_monitors!(nodes[2], 3);
let htlc_value = if use_dust { 50000 } else { 3000000 };
let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
nodes[1].node.fail_htlc_backwards(&our_payment_hash);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
check_added_monitors!(nodes[1], 1);
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
// nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(process_htlc_forwards_event.len(), 1);
+ assert_eq!(process_htlc_forwards_event.len(), 2);
match &process_htlc_forwards_event[0] {
&Event::PendingHTLCsForwardable { .. } => {},
_ => panic!("Unexpected event"),
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
+ let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events_4.len(), 1);
// Fail one HTLC to prune it in the will-be-latest-local commitment tx
nodes[1].node.fail_htlc_backwards(&payment_hash_2);
check_added_monitors!(nodes[1], 0);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
check_added_monitors!(nodes[1], 1);
let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
// Note that we first have to wait a random delay before processing the receipt of the HTLC,
// and then will wait a second random delay before failing the HTLC back:
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
// Node 3 is expecting payment of 100_000 but received 10_000,
// it should fail htlc like we didn't know the preimage.
connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
let events = nodes[0].node.get_and_clear_pending_events();
expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
- match events[1] {
+ match events.last().unwrap() {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
_ => panic!("Unexpected event"),
}
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
// Lock HTLC in both directions
- let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
- route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000).0;
+ let (payment_preimage_1, _, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
+ let (_, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
assert_eq!(revoked_local_txn[0].input.len(), 1);
assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
// Revoke local commitment tx
- claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
// Broadcast set of revoked txn on A
connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
- expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
mine_transaction(&nodes[0], &revoked_local_txn[0]);
// All the below cases should end up being handled exactly identically, so we macro the
// resulting events.
macro_rules! handle_unknown_invalid_payment_data {
- () => {
+ ($payment_hash: expr) => {
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
let payment_event = SendEvent::from_event(events.pop().unwrap());
// We have to forward pending HTLCs once to process the receipt of the HTLC and then
// again to process the pending backwards-failure of the HTLC
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
check_added_monitors!(nodes[1], 1);
// We should fail the payment back
// Send a payment with the right payment hash but the wrong payment secret
nodes[0].node.send_payment(&route, our_payment_hash, &Some(random_payment_secret)).unwrap();
- handle_unknown_invalid_payment_data!();
+ handle_unknown_invalid_payment_data!(our_payment_hash);
expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
// Send a payment with a random payment hash, but the right payment secret
nodes[0].node.send_payment(&route, random_payment_hash, &Some(our_payment_secret)).unwrap();
- handle_unknown_invalid_payment_data!();
+ handle_unknown_invalid_payment_data!(random_payment_hash);
expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
// Send a payment with a random payment hash and random payment secret
nodes[0].node.send_payment(&route, random_payment_hash, &Some(random_payment_secret)).unwrap();
- handle_unknown_invalid_payment_data!();
+ handle_unknown_invalid_payment_data!(random_payment_hash);
expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
}
// additional block built on top of the current chain.
nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
&nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
// Now we go fail back the first HTLC from the user end.
nodes[1].node.fail_htlc_backwards(&our_payment_hash);
- expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ let expected_destinations = vec![
+ HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
+ HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], expected_destinations);
nodes[1].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[1], 1);
if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); }
} else {
// Let the second HTLC fail and claim the first
- expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
nodes[1].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[1], 1);
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+ let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id())
.with_features(InvoiceFeatures::known());
}
expect_pending_htlcs_forwardable_ignore!(nodes[3]);
nodes[3].node.process_pending_htlc_forwards();
- expect_pending_htlcs_forwardable_ignore!(nodes[3]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
nodes[3].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[3], 1);
nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
check_added_monitors!(nodes[2], 1);
let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
connect_blocks(&nodes[3], TEST_FINAL_CLTV);
connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
- expect_pending_htlcs_forwardable!(nodes[3]);
+ let failed_destinations = vec![
+ HTLCDestination::FailedPayment { payment_hash },
+ HTLCDestination::FailedPayment { payment_hash },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
use ln::channelmanager::BREAKDOWN_TIMEOUT;
use ln::features::InitFeatures;
use ln::msgs::ChannelMessageHandler;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
use bitcoin::blockdata::script::Builder;
use bitcoin::blockdata::opcodes;
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
check_added_monitors!(nodes[1], 1);
let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
use ln::msgs;
use ln::msgs::{ChannelMessageHandler, ChannelUpdate, OptionalField};
use ln::wire::Encode;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
use util::ser::{ReadableArgs, Writeable, Writer};
use util::{byte_utils, test_utils};
use util::config::{UserConfig, ChannelConfig};
expect_htlc_forward!(&nodes[2]);
expect_event!(&nodes[2], Event::PaymentReceived);
callback_node();
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
}
let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
};
expect_pending_htlcs_forwardable_ignore!(nodes[1]);
nodes[1].node.process_pending_htlc_forwards();
- expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
nodes[1].node.process_pending_htlc_forwards();
let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
check_added_monitors!(&nodes[1], 1);
}
expect_pending_htlcs_forwardable_ignore!(nodes[1]);
nodes[1].node.process_pending_htlc_forwards();
- expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
nodes[1].node.process_pending_htlc_forwards();
let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
check_added_monitors!(&nodes[1], 1);
}
expect_pending_htlcs_forwardable_ignore!(nodes[1]);
nodes[1].node.process_pending_htlc_forwards();
- expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
nodes[1].node.process_pending_htlc_forwards();
let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
check_added_monitors!(&nodes[1], 1);
expect_pending_htlcs_forwardable_ignore!(nodes[1]);
nodes[1].node.process_pending_htlc_forwards();
- expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
nodes[1].node.process_pending_htlc_forwards();
let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
check_added_monitors!(&nodes[1], 1);
nodes[1].node.process_pending_htlc_forwards();
expect_pending_htlcs_forwardable_ignore!(nodes[1]);
nodes[1].node.process_pending_htlc_forwards();
- expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
nodes[1].node.process_pending_htlc_forwards();
let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
check_added_monitors!(&nodes[1], 1);
nodes[1].node.process_pending_htlc_forwards();
expect_payment_received!(nodes[1], payment_hash, payment_secret, recv_amt_msat);
nodes[1].node.fail_htlc_backwards(&payment_hash);
- expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
nodes[1].node.process_pending_htlc_forwards();
let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
use ln::msgs;
use ln::msgs::ChannelMessageHandler;
use routing::router::{PaymentParameters, get_route};
-use util::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
use util::test_utils;
use util::errors::APIError;
use util::enforcing_trait_impls::EnforcingSigner;
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let _chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known());
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known());
// Rebalance to find a route
send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(&nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1.2 }]);
let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(htlc_updates.update_add_htlcs.is_empty());
assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
- let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let chan_4_id = create_announced_chan_between_nodes(&nodes, 3, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
+ let (chan_3_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
+ let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes(&nodes, 3, 2, InitFeatures::known(), InitFeatures::known());
// Rebalance
send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
let path = route.paths[0].clone();
route.paths.push(path);
route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
- route.paths[0][0].short_channel_id = chan_1_id;
- route.paths[0][1].short_channel_id = chan_3_id;
+ route.paths[0][0].short_channel_id = chan_1_update.contents.short_channel_id;
+ route.paths[0][1].short_channel_id = chan_3_update.contents.short_channel_id;
route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
- route.paths[1][0].short_channel_id = chan_2_id;
- route.paths[1][1].short_channel_id = chan_4_id;
+ route.paths[1][0].short_channel_id = chan_2_update.contents.short_channel_id;
+ route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id;
// Initiate the MPP payment.
let payment_id = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
// Attempt to forward the payment and complete the 2nd path's failure.
expect_pending_htlcs_forwardable!(&nodes[2]);
- expect_pending_htlcs_forwardable!(&nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]);
let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
assert!(htlc_updates.update_add_htlcs.is_empty());
assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
- let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
+ let (chan_3_update, _, chan_3_id, _) = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
+ let (chan_4_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000);
let path = route.paths[0].clone();
route.paths.push(path);
route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
- route.paths[0][0].short_channel_id = chan_1_id;
- route.paths[0][1].short_channel_id = chan_3_id;
+ route.paths[0][0].short_channel_id = chan_1_update.contents.short_channel_id;
+ route.paths[0][1].short_channel_id = chan_3_update.contents.short_channel_id;
route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
- route.paths[1][0].short_channel_id = chan_2_id;
- route.paths[1][1].short_channel_id = chan_4_id;
+ route.paths[1][0].short_channel_id = chan_2_update.contents.short_channel_id;
+ route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id;
// Initiate the MPP payment.
let _ = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
}
// Failed HTLC from node 3 -> 1
- expect_pending_htlcs_forwardable!(nodes[3]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash }]);
let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id());
assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1);
nodes[1].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false);
// Failed HTLC from node 1 -> 0
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]);
let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1);
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let _chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known());
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known());
// Rebalance to find a route
send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(&nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1.2 }]);
let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(htlc_updates.update_add_htlcs.is_empty());
assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
nodes[1].node.fail_htlc_backwards(&payment_hash);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors!(nodes[1], 1);
let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]);
use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ChannelUpdate, ErrorAction};
use ln::wire::Encode;
use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
use util::config::UserConfig;
use util::ser::{Writeable, ReadableArgs};
use util::test_utils;
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0, InitFeatures::known(), InitFeatures::known());
- create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000, 0, InitFeatures::known(), InitFeatures::known());
+ let chan = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000, 0, InitFeatures::known(), InitFeatures::known());
let last_hop = nodes[2].node.list_usable_channels();
let mut hop_hints = vec![RouteHint(vec![RouteHintHop {
commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }]);
check_added_monitors!(nodes[1], 1);
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
use ln::features::InitFeatures;
use ln::msgs::ChannelMessageHandler;
use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
use util::test_utils;
use util::ser::{ReadableArgs, Writeable};
txdata: vec![],
};
connect_block(&nodes[1], &block);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
}
check_added_monitors!(nodes[1], 1);
update_res
}
- fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>)> {
+ fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
return self.chain_monitor.release_pending_monitor_events();
}
}