}
}
+ fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
+ let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination);
+ if push_forward_event { self.push_pending_forwards_ev(); }
+ }
+
/// Fails an HTLC backwards to the sender of it to us.
/// Note that we do not assume that channels corresponding to failed HTLCs are still available.
- fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
+ fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool {
// Ensure that no peer state channel storage lock is held when calling this function.
// This ensures that future code doesn't introduce a lock-order requirement for
// `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// from block_connected which may run during initialization prior to the chain_monitor
// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
+ let mut push_forward_event;
match source {
HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
- if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
+ push_forward_event = self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
- &self.pending_events, &self.logger)
- { self.push_pending_forwards_ev(); }
+ &self.pending_events, &self.logger);
},
HTLCSource::PreviousHopData(HTLCPreviousHopData {
ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
}
};
- let mut push_forward_ev = self.decode_update_add_htlcs.lock().unwrap().is_empty();
+ push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
- push_forward_ev &= forward_htlcs.is_empty();
+ push_forward_event &= forward_htlcs.is_empty();
match forward_htlcs.entry(*short_channel_id) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(failure);
}
}
mem::drop(forward_htlcs);
- if push_forward_ev { self.push_pending_forwards_ev(); }
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::HTLCHandlingFailed {
prev_channel_id: *channel_id,
}, None));
},
}
+ push_forward_event
}
/// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
#[inline]
fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
+ let push_forward_event = self.forward_htlcs_without_forward_event(per_source_pending_forwards);
+ if push_forward_event { self.push_pending_forwards_ev() }
+ }
+
+ #[inline]
+ fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool {
+ let mut push_forward_event = false;
for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
- let mut push_forward_event = false;
let mut new_intercept_events = VecDeque::new();
let mut failed_intercept_forwards = Vec::new();
if !pending_forwards.is_empty() {
} else {
// We don't want to generate a PendingHTLCsForwardable event if only intercepted
// payments are being processed.
- if forward_htlcs_empty && decode_update_add_htlcs_empty {
- push_forward_event = true;
- }
+ push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty;
entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })));
}
}
for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) {
- self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
+ push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(&htlc_source, &payment_hash, &failure_reason, destination);
}
if !new_intercept_events.is_empty() {
let mut events = self.pending_events.lock().unwrap();
events.append(&mut new_intercept_events);
}
- if push_forward_event { self.push_pending_forwards_ev() }
}
+ push_forward_event
}
fn push_pending_forwards_ev(&self) {
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let mut events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+ expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
match events.last().unwrap() {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
_ => panic!("Unexpected event"),
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
match events[0] {
- Event::PendingHTLCsForwardable { .. } => { },
- _ => panic!("Unexpected event"),
- };
- match events[1] {
Event::HTLCHandlingFailed { .. } => { },
_ => panic!("Unexpected event"),
}
+ match events[1] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
// Deliberately don't process the pending fail-back so they all fail back at once after
// block connection just like the !deliver_bs_raa case
}
connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
check_closed_broadcast!(nodes[2], true);
if deliver_last_raa {
- expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+ expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true);
let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
// nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
assert_eq!(process_htlc_forwards_event.len(), 2);
- match &process_htlc_forwards_event[0] {
+ match &process_htlc_forwards_event[1] {
&Event::PendingHTLCsForwardable { .. } => {},
_ => panic!("Unexpected event"),
}
let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
- send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
+ let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1;
let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
assert_eq!(revoked_local_txn[0].input.len(), 1);
let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
connect_block(&nodes[0], &block_129);
let events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+ expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]);
match events.last().unwrap() {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
_ => panic!("Unexpected event"),