for htlc_source in failed_htlcs.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
+ self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
let _ = handle_error!(self, result, *counterparty_node_id);
for htlc_source in failed_htlcs.drain(..) {
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: channel_id };
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
+ self.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
if let Some((funding_txo, monitor_update)) = monitor_update_option {
// There isn't anything we can do if we get an update failure - we're already
let mut phantom_receives: Vec<(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
let mut handle_errors = Vec::new();
{
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
-
let mut forward_htlcs = HashMap::new();
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
for (short_chan_id, mut pending_forwards) in forward_htlcs {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
if short_chan_id != 0 {
let forward_chan_id = match channel_state.short_to_chan_info.get(&short_chan_id) {
Some((_cp_id, chan_id)) => chan_id.clone(),
}
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason, destination);
+ self.fail_htlc_backwards_internal(htlc_source, &payment_hash, failure_reason, destination);
}
self.forward_htlcs(&mut phantom_receives);
for htlc_source in timed_out_mpp_htlcs.drain(..) {
let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
+ self.fail_htlc_backwards_internal(HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
}
for (err, counterparty_node_id) in handle_errors.drain(..) {
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let mut channel_state = Some(self.channel_state.lock().unwrap());
- let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
+ let removed_source = {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.claimable_htlcs.remove(payment_hash)
+ };
if let Some((_, mut sources)) = removed_source {
for htlc in sources.drain(..) {
- if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
self.best_block.read().unwrap().height()));
- self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
+ self.fail_htlc_backwards_internal(
HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
HTLCDestination::FailedPayment { payment_hash: *payment_hash });
counterparty_node_id: &PublicKey
) {
for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
- let mut channel_state = self.channel_state.lock().unwrap();
let (failure_code, onion_failure_data) =
- match channel_state.by_id.entry(channel_id) {
+ match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
hash_map::Entry::Occupied(chan_entry) => {
self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
},
};
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
- self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver);
+ self.fail_htlc_backwards_internal(htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver);
}
}
/// Fails an HTLC backwards to the sender of it to us.
- /// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
- /// There are several callsites that do stupid things like loop over a list of payment_hashes
- /// to fail and take the channel_state lock for each iteration (as we take ownership and may
- /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
- /// still-available channels.
- fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason, destination: HTLCDestination) {
+ /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
+ fn fail_htlc_backwards_internal(&self, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason,destination: HTLCDestination) {
+ #[cfg(debug_assertions)]
+ {
+ // Ensure that the `channel_state` lock is not held when calling this function.
+ // This ensures that future code doesn't introduce a lock_order requirement for
+ // `forward_htlcs` to be locked after the `channel_state` lock, which calling this
+ // function with the `channel_state` locked would.
+ assert!(self.channel_state.try_lock().is_ok());
+ }
+
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
//identify whether we sent it or not based on the (I presume) very different runtime
//between the branches here. We should make this async and move it into the forward HTLCs
log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
return;
}
- mem::drop(channel_state_lock);
let mut retry = if let Some(payment_params_data) = payment_params {
let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
Some(RouteParameters {
}
}
mem::drop(forward_htlcs);
- mem::drop(channel_state_lock);
let mut pending_events = self.pending_events.lock().unwrap();
if let Some(time) = forward_event {
pending_events.push(events::Event::PendingHTLCsForwardable {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let mut channel_state = Some(self.channel_state.lock().unwrap());
- let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
+ let removed_source = self.channel_state.lock().unwrap().claimable_htlcs.remove(&payment_hash);
if let Some((payment_purpose, mut sources)) = removed_source {
assert!(!sources.is_empty());
let mut claimable_amt_msat = 0;
let mut expected_amt_msat = None;
let mut valid_mpp = true;
+ let mut errs = Vec::new();
+ let mut claimed_any_htlcs = false;
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
for htlc in sources.iter() {
- if let None = channel_state.as_ref().unwrap().short_to_chan_info.get(&htlc.prev_hop.short_channel_id) {
+ if let None = channel_state.short_to_chan_info.get(&htlc.prev_hop.short_channel_id) {
valid_mpp = false;
break;
}
expected_amt_msat.unwrap(), claimable_amt_msat);
return;
}
-
- let mut errs = Vec::new();
- let mut claimed_any_htlcs = false;
- for htlc in sources.drain(..) {
- if !valid_mpp {
- if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
- let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
- htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
- self.best_block.read().unwrap().height()));
- self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
- HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data },
- HTLCDestination::FailedPayment { payment_hash } );
- } else {
- match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
+ if valid_mpp {
+ for htlc in sources.drain(..) {
+ match self.claim_funds_from_hop(&mut channel_state_lock, htlc.prev_hop, payment_preimage) {
ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
if let msgs::ErrorAction::IgnoreError = err.err.action {
// We got a temporary failure updating monitor, but will claim the
}
}
}
+ mem::drop(channel_state_lock);
+ if !valid_mpp {
+ for htlc in sources.drain(..) {
+ let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
+ htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
+ self.best_block.read().unwrap().height()));
+ self.fail_htlc_backwards_internal(
+ HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
+ HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data },
+ HTLCDestination::FailedPayment { payment_hash } );
+ }
+ }
if claimed_any_htlcs {
self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
});
}
- // Now that we've done the entire above loop in one lock, we can handle any errors
- // which were generated.
- channel_state.take();
-
+ // Now we can handle any errors which were generated.
for (counterparty_node_id, err) in errs.drain(..) {
let res: Result<(), _> = Err(err);
let _ = handle_error!(self, res, counterparty_node_id);
self.finalize_claims(finalized_claims);
for failure in pending_failures.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
+ self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
}
}
};
for htlc_source in dropped_htlcs.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
+ self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
let _ = handle_error!(self, result, *counterparty_node_id);
for &mut (prev_short_channel_id, prev_funding_outpoint, ref mut pending_forwards) in per_source_pending_forwards {
let mut forward_event = None;
if !pending_forwards.is_empty() {
- let mut channel_state = self.channel_state.lock().unwrap();
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
if forward_htlcs.is_empty() {
forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS))
{
for failure in pending_failures.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
+ self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
}
self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
self.finalize_claims(finalized_claim_htlcs);
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
+ self.fail_htlc_backwards_internal(htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
},
MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
self.handle_init_event_channel_failures(failed_channels);
for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason, destination);
+ self.fail_htlc_backwards_internal(source, &payment_hash, reason, destination);
}
}
for htlc_source in failed_htlcs.drain(..) {
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
- channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
+ channel_manager.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
//TODO: Broadcast channel update for closed channels, but only after we've made a