/// SCIDs being added once the funding transaction is confirmed at the channel's required
/// confirmation depth.
pub(super) short_to_chan_info: HashMap<u64, (PublicKey, [u8; 32])>,
- /// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
- ///
- /// Note that because we may have an SCID Alias as the key we can have two entries per channel,
- /// though in practice we probably won't be receiving HTLCs for a channel both via the alias
- /// and via the classic SCID.
- ///
- /// Note that while this is held in the same mutex as the channels themselves, no consistency
- /// guarantees are made about the existence of a channel with the short id here, nor the short
- /// ids in the PendingHTLCInfo!
- pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
/// Map from payment hash to the payment data and any HTLCs which are to us and can be
/// failed/claimed by the user.
///
/// essentially you should default to using a SimpleRefChannelManager, and use a
/// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
/// you're using lightning-net-tokio.
+//
+// Lock order:
+// The tree structure below illustrates the lock order requirements for the different locks of the
+// `ChannelManager`. Locks can be held at the same time if they are on the same branch in the tree,
+// and should then be taken in the order of the lowest to the highest level in the tree.
+// Note that locks on different branches shall not be taken at the same time, as doing so will
+// create a new lock order for those specific locks in the order they were taken.
+//
+// Lock order tree:
+//
+// `total_consistency_lock`
+// |
+// |__`forward_htlcs`
+// |
+// |__`channel_state`
+// | |
+// | |__`id_to_peer`
+// | |
+// | |__`per_peer_state`
+// | |
+// | |__`outbound_scid_aliases`
+// | |
+// | |__`pending_inbound_payments`
+// | |
+// | |__`pending_outbound_payments`
+// | |
+// | |__`best_block`
+// | |
+// | |__`pending_events`
+// | |
+// | |__`pending_background_events`
+//
pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
chain_monitor: M,
tx_broadcaster: T,
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
#[cfg(test)]
pub(super) best_block: RwLock<BestBlock>,
#[cfg(not(test))]
best_block: RwLock<BestBlock>,
secp_ctx: Secp256k1<secp256k1::All>,
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
#[cfg(any(test, feature = "_test_utils"))]
pub(super) channel_state: Mutex<ChannelHolder<Signer>>,
#[cfg(not(any(test, feature = "_test_utils")))]
/// expose them to users via a PaymentReceived event. HTLCs which do not meet the requirements
/// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed
/// after we generate a PaymentReceived upon receipt of all MPP parts or when they time out.
- /// Locked *after* channel_state.
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
/// The session_priv bytes and retry metadata of outbound payments which are pending resolution.
///
/// See `PendingOutboundPayment` documentation for more info.
///
- /// Locked *after* channel_state.
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
pending_outbound_payments: Mutex<HashMap<PaymentId, PendingOutboundPayment>>,
+ /// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
+ ///
+ /// Note that because we may have an SCID Alias as the key we can have two entries per channel,
+ /// though in practice we probably won't be receiving HTLCs for a channel both via the alias
+ /// and via the classic SCID.
+ ///
+ /// Note that no consistency guarantees are made about the existence of a channel with the
+ /// `short_channel_id` here, nor the `short_channel_id` in the `PendingHTLCInfo`!
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
+ #[cfg(test)]
+ pub(super) forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
+ #[cfg(not(test))]
+ forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
+
/// The set of outbound SCID aliases across all our channels, including unconfirmed channels
/// and some closed channels which reached a usable state prior to being closed. This is used
/// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
/// active channel list on load.
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
outbound_scid_aliases: Mutex<HashSet<u64>>,
/// `channel_id` -> `counterparty_node_id`.
/// We should add `counterparty_node_id`s to `MonitorEvent`s, and eventually rely on it in the
/// future. That would make this map redundant, as only the `ChannelManager::per_peer_state` is
/// required to access the channel with the `counterparty_node_id`.
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
id_to_peer: Mutex<HashMap<[u8; 32], PublicKey>>,
our_network_key: SecretKey,
/// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
/// new channel.
///
- /// If also holding `channel_state` lock, must lock `channel_state` prior to `per_peer_state`.
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
pending_events: Mutex<Vec<events::Event>>,
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
pending_background_events: Mutex<Vec<BackgroundEvent>>,
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
/// Essentially just when we're serializing ourselves out.
/// in over-/re-payment.
///
/// The results here are ordered the same as the paths in the route object which was passed to
- /// send_payment, and any Errs which are not APIError::MonitorUpdateFailed can be safely
- /// retried (though there is currently no API with which to do so).
+ /// send_payment, and any `Err`s which are not [`APIError::MonitorUpdateInProgress`] can be
+ /// safely retried via [`ChannelManager::retry_payment`].
///
- /// Any entries which contain Err(APIError::MonitorUpdateFailed) or Ok(()) MUST NOT be retried
- /// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the
- /// case of Ok(())) or will send once a [`MonitorEvent::Completed`] is provided for the
- /// next-hop channel with the latest update_id.
+ /// Any entries which contain `Err(APIError::MonitorUpdateInprogress)` or `Ok(())` MUST NOT be
+ /// retried as they will result in over-/re-payment. These HTLCs all either successfully sent
+ /// (in the case of `Ok(())`) or will send once a [`MonitorEvent::Completed`] is provided for
+ /// the next-hop channel with the latest update_id.
PartialFailure {
/// The errors themselves, in the same order as the route hops.
results: Vec<Result<(), APIError>>,
}
}
-macro_rules! handle_monitor_err {
+macro_rules! handle_monitor_update_res {
($self: ident, $err: expr, $short_to_chan_info: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
match $err {
ChannelMonitorUpdateStatus::PermanentFailure => {
}
};
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
- let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_chan_info, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
+ let (res, drop) = handle_monitor_update_res!($self, $err, $channel_state.short_to_chan_info, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
if drop {
$entry.remove_entry();
}
} };
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
- handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+ handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
} };
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
- handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+ handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
};
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
- handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
+ handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
};
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
- handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
+ handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
};
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
- handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
+ handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
};
}
if $raa.is_none() {
order = RAACommitmentOrder::CommitmentFirst;
}
- break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
+ break handle_monitor_update_res!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
}
}
}
channel_state: Mutex::new(ChannelHolder{
by_id: HashMap::new(),
short_to_chan_info: HashMap::new(),
- forward_htlcs: HashMap::new(),
claimable_htlcs: HashMap::new(),
pending_msg_events: Vec::new(),
}),
outbound_scid_aliases: Mutex::new(HashSet::new()),
pending_inbound_payments: Mutex::new(HashMap::new()),
pending_outbound_payments: Mutex::new(HashMap::new()),
+ forward_htlcs: Mutex::new(HashMap::new()),
id_to_peer: Mutex::new(HashMap::new()),
our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(),
if let Some(monitor_update) = monitor_update {
let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
let (result, is_permanent) =
- handle_monitor_err!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+ handle_monitor_update_res!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
if is_permanent {
remove_channel!(self, channel_state, chan_entry);
break result;
for htlc_source in failed_htlcs.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
+ self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
let _ = handle_error!(self, result, *counterparty_node_id);
for htlc_source in failed_htlcs.drain(..) {
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: channel_id };
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
+ self.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
if let Some((funding_txo, monitor_update)) = monitor_update_option {
// There isn't anything we can do if we get an update failure - we're already
let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
let chan_id = chan.get().channel_id();
match (update_err,
- handle_monitor_err!(self, update_err, channel_state, chan,
+ handle_monitor_update_res!(self, update_err, channel_state, chan,
RAACommitmentOrder::CommitmentFirst, false, true))
{
(ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
insert_outbound_payment!();
},
(ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
- // Note that MonitorUpdateFailed here indicates (per function docs)
- // that we will resend the commitment update once monitor updating
- // is restored. Therefore, we must return an error indicating that
- // it is unsafe to retry the payment wholesale, which we do in the
- // send_payment check for MonitorUpdateFailed, below.
+ // Note that MonitorUpdateInProgress here indicates (per function
+ // docs) that we will resend the commitment update once monitor
+ // updating completes. Therefore, we must return an error
+ // indicating that it is unsafe to retry the payment wholesale,
+ // which we do in the send_payment check for
+ // MonitorUpdateInProgress, below.
insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above.
- return Err(APIError::MonitorUpdateFailed);
+ return Err(APIError::MonitorUpdateInProgress);
},
_ => unreachable!(),
}
/// PaymentSendFailure for more info.
///
/// In general, a path may raise:
- /// * APIError::RouteError when an invalid route or forwarding parameter (cltv_delta, fee,
+ /// * [`APIError::RouteError`] when an invalid route or forwarding parameter (cltv_delta, fee,
/// node public key) is specified.
- /// * APIError::ChannelUnavailable if the next-hop channel is not available for updates
+ /// * [`APIError::ChannelUnavailable`] if the next-hop channel is not available for updates
/// (including due to previous monitor update failure or new permanent monitor update
/// failure).
- /// * APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the
+ /// * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the
/// relevant updates.
///
/// Note that depending on the type of the PaymentSendFailure the HTLC may have been
for (res, path) in results.iter().zip(route.paths.iter()) {
if res.is_ok() { has_ok = true; }
if res.is_err() { has_err = true; }
- if let &Err(APIError::MonitorUpdateFailed) = res {
- // MonitorUpdateFailed is inherently unsafe to retry, so we call it a
+ if let &Err(APIError::MonitorUpdateInProgress) = res {
+ // MonitorUpdateInProgress is inherently unsafe to retry, so we call it a
// PartialFailure.
has_err = true;
has_ok = true;
// Transactions are evaluated as final by network mempools at the next block. However, the modules
// constituting our Lightning node might not have perfect sync about their blockchain views. Thus, if
// the wallet module is in advance on the LDK view, allow one more block of headroom.
- // TODO: updated if/when https://github.com/rust-bitcoin/rust-bitcoin/pull/994 landed and rust-bitcoin bumped.
if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 2 {
return Err(APIError::APIMisuseError {
err: "Funding transaction absolute timelock is non-final".to_owned()
let mut phantom_receives: Vec<(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
let mut handle_errors = Vec::new();
{
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
+ let mut forward_htlcs = HashMap::new();
+ mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
- for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() {
+ for (short_chan_id, mut pending_forwards) in forward_htlcs {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
if short_chan_id != 0 {
let forward_chan_id = match channel_state.short_to_chan_info.get(&short_chan_id) {
Some((_cp_id, chan_id)) => chan_id.clone(),
match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
ChannelMonitorUpdateStatus::Completed => {},
e => {
- handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
+ handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
continue;
}
}
}
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason, destination);
+ self.fail_htlc_backwards_internal(htlc_source, &payment_hash, failure_reason, destination);
}
self.forward_htlcs(&mut phantom_receives);
Ok(())
},
e => {
- let (res, drop) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
+ let (res, drop) = handle_monitor_update_res!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
if drop { retain_channel = false; }
res
}
for htlc_source in timed_out_mpp_htlcs.drain(..) {
let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
+ self.fail_htlc_backwards_internal(HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
}
for (err, counterparty_node_id) in handle_errors.drain(..) {
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let mut channel_state = Some(self.channel_state.lock().unwrap());
- let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
+ let removed_source = {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.claimable_htlcs.remove(payment_hash)
+ };
if let Some((_, mut sources)) = removed_source {
for htlc in sources.drain(..) {
- if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
self.best_block.read().unwrap().height()));
- self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
+ self.fail_htlc_backwards_internal(
HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
HTLCDestination::FailedPayment { payment_hash: *payment_hash });
counterparty_node_id: &PublicKey
) {
for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
- let mut channel_state = self.channel_state.lock().unwrap();
let (failure_code, onion_failure_data) =
- match channel_state.by_id.entry(channel_id) {
+ match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
hash_map::Entry::Occupied(chan_entry) => {
self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
},
};
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
- self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver);
+ self.fail_htlc_backwards_internal(htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver);
}
}
/// Fails an HTLC backwards to the sender of it to us.
- /// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
- /// There are several callsites that do stupid things like loop over a list of payment_hashes
- /// to fail and take the channel_state lock for each iteration (as we take ownership and may
- /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
- /// still-available channels.
- fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason, destination: HTLCDestination) {
+ /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
+ fn fail_htlc_backwards_internal(&self, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason,destination: HTLCDestination) {
+ #[cfg(debug_assertions)]
+ {
+ // Ensure that the `channel_state` lock is not held when calling this function.
+ // This ensures that future code doesn't introduce a lock_order requirement for
+ // `forward_htlcs` to be locked after the `channel_state` lock, which calling this
+ // function with the `channel_state` locked would.
+ assert!(self.channel_state.try_lock().is_ok());
+ }
+
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
//identify whether we sent it or not based on the (I presume) very different runtime
//between the branches here. We should make this async and move it into the forward HTLCs
log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
return;
}
- mem::drop(channel_state_lock);
let mut retry = if let Some(payment_params_data) = payment_params {
let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
Some(RouteParameters {
};
let mut forward_event = None;
- if channel_state_lock.forward_htlcs.is_empty() {
+ let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
+ if forward_htlcs.is_empty() {
forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS));
}
- match channel_state_lock.forward_htlcs.entry(short_channel_id) {
+ match forward_htlcs.entry(short_channel_id) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet });
},
entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }));
}
}
- mem::drop(channel_state_lock);
+ mem::drop(forward_htlcs);
let mut pending_events = self.pending_events.lock().unwrap();
if let Some(time) = forward_event {
pending_events.push(events::Event::PendingHTLCsForwardable {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let mut channel_state = Some(self.channel_state.lock().unwrap());
- let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
+ let removed_source = self.channel_state.lock().unwrap().claimable_htlcs.remove(&payment_hash);
if let Some((payment_purpose, mut sources)) = removed_source {
assert!(!sources.is_empty());
let mut claimable_amt_msat = 0;
let mut expected_amt_msat = None;
let mut valid_mpp = true;
+ let mut errs = Vec::new();
+ let mut claimed_any_htlcs = false;
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
for htlc in sources.iter() {
- if let None = channel_state.as_ref().unwrap().short_to_chan_info.get(&htlc.prev_hop.short_channel_id) {
+ if let None = channel_state.short_to_chan_info.get(&htlc.prev_hop.short_channel_id) {
valid_mpp = false;
break;
}
expected_amt_msat.unwrap(), claimable_amt_msat);
return;
}
-
- let mut errs = Vec::new();
- let mut claimed_any_htlcs = false;
- for htlc in sources.drain(..) {
- if !valid_mpp {
- if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
- let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
- htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
- self.best_block.read().unwrap().height()));
- self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
- HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data },
- HTLCDestination::FailedPayment { payment_hash } );
- } else {
- match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
+ if valid_mpp {
+ for htlc in sources.drain(..) {
+ match self.claim_funds_from_hop(&mut channel_state_lock, htlc.prev_hop, payment_preimage) {
ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
if let msgs::ErrorAction::IgnoreError = err.err.action {
// We got a temporary failure updating monitor, but will claim the
}
}
}
+ mem::drop(channel_state_lock);
+ if !valid_mpp {
+ for htlc in sources.drain(..) {
+ let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
+ htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
+ self.best_block.read().unwrap().height()));
+ self.fail_htlc_backwards_internal(
+ HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
+ HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data },
+ HTLCDestination::FailedPayment { payment_hash } );
+ }
+ }
if claimed_any_htlcs {
self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
});
}
- // Now that we've done the entire above loop in one lock, we can handle any errors
- // which were generated.
- channel_state.take();
-
+ // Now we can handle any errors which were generated.
for (counterparty_node_id, err) in errs.drain(..) {
let res: Result<(), _> = Err(err);
let _ = handle_error!(self, res, counterparty_node_id);
payment_preimage, e);
return ClaimFundsFromHop::MonitorUpdateFail(
chan.get().get_counterparty_node_id(),
- handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
+ handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
Some(htlc_value_msat)
);
}
self.finalize_claims(finalized_claims);
for failure in pending_failures.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
+ self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
}
}
match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
ChannelMonitorUpdateStatus::Completed => {},
e => {
- let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
+ let mut res = handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
// We weren't able to watch the channel to begin with, so no updates should be made on
// it. Previously, full_stack_target found an (unreachable) panic when the
if let Some(monitor_update) = monitor_update {
let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
let (result, is_permanent) =
- handle_monitor_err!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+ handle_monitor_update_res!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
if is_permanent {
remove_channel!(self, channel_state, chan_entry);
break result;
};
for htlc_source in dropped_htlcs.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
+ self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
let _ = handle_error!(self, result, *counterparty_node_id);
Ok(res) => res
};
let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
- if let Err(e) = handle_monitor_err!(self, update_res, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
+ if let Err(e) = handle_monitor_update_res!(self, update_res, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
return Err(e);
}
for &mut (prev_short_channel_id, prev_funding_outpoint, ref mut pending_forwards) in per_source_pending_forwards {
let mut forward_event = None;
if !pending_forwards.is_empty() {
- let mut channel_state = self.channel_state.lock().unwrap();
- if channel_state.forward_htlcs.is_empty() {
+ let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
+ if forward_htlcs.is_empty() {
forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS))
}
for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
- match channel_state.forward_htlcs.entry(match forward_info.routing {
+ match forward_htlcs.entry(match forward_info.routing {
PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
PendingHTLCRouting::Receive { .. } => 0,
PendingHTLCRouting::ReceiveKeysend { .. } => 0,
break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
}
if update_res != ChannelMonitorUpdateStatus::Completed {
- if let Err(e) = handle_monitor_err!(self, update_res, channel_state, chan,
+ if let Err(e) = handle_monitor_update_res!(self, update_res, channel_state, chan,
RAACommitmentOrder::CommitmentFirst, false,
raa_updates.commitment_update.is_some(), false,
raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
{
for failure in pending_failures.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
+ self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
}
self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
self.finalize_claims(finalized_claim_htlcs);
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
+ self.fail_htlc_backwards_internal(htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
},
MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
},
e => {
has_monitor_update = true;
- let (res, close_channel) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
+ let (res, close_channel) = handle_monitor_update_res!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
handle_errors.push((chan.get_counterparty_node_id(), res));
if close_channel { return false; }
},
self.handle_init_event_channel_failures(failed_channels);
for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason, destination);
+ self.fail_htlc_backwards_internal(source, &payment_hash, reason, destination);
}
}
best_block.block_hash().write(writer)?;
}
- let channel_state = self.channel_state.lock().unwrap();
- let mut unfunded_channels = 0;
- for (_, channel) in channel_state.by_id.iter() {
- if !channel.is_funding_initiated() {
- unfunded_channels += 1;
+ {
+ // Take `channel_state` lock temporarily to avoid creating a lock order that requires
+ // that the `forward_htlcs` lock is taken after `channel_state`
+ let channel_state = self.channel_state.lock().unwrap();
+ let mut unfunded_channels = 0;
+ for (_, channel) in channel_state.by_id.iter() {
+ if !channel.is_funding_initiated() {
+ unfunded_channels += 1;
+ }
}
- }
- ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
- for (_, channel) in channel_state.by_id.iter() {
- if channel.is_funding_initiated() {
- channel.write(writer)?;
+ ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
+ for (_, channel) in channel_state.by_id.iter() {
+ if channel.is_funding_initiated() {
+ channel.write(writer)?;
+ }
}
}
- (channel_state.forward_htlcs.len() as u64).write(writer)?;
- for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() {
- short_channel_id.write(writer)?;
- (pending_forwards.len() as u64).write(writer)?;
- for forward in pending_forwards {
- forward.write(writer)?;
+ {
+ let forward_htlcs = self.forward_htlcs.lock().unwrap();
+ (forward_htlcs.len() as u64).write(writer)?;
+ for (short_channel_id, pending_forwards) in forward_htlcs.iter() {
+ short_channel_id.write(writer)?;
+ (pending_forwards.len() as u64).write(writer)?;
+ for forward in pending_forwards {
+ forward.write(writer)?;
+ }
}
}
+ let channel_state = self.channel_state.lock().unwrap();
let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
(channel_state.claimable_htlcs.len() as u64).write(writer)?;
for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() {
channel_state: Mutex::new(ChannelHolder {
by_id,
short_to_chan_info,
- forward_htlcs,
claimable_htlcs,
pending_msg_events: Vec::new(),
}),
pending_inbound_payments: Mutex::new(pending_inbound_payments),
pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
+ forward_htlcs: Mutex::new(forward_htlcs),
outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
id_to_peer: Mutex::new(id_to_peer),
fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
for htlc_source in failed_htlcs.drain(..) {
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
- channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
+ channel_manager.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
//TODO: Broadcast channel update for closed channels, but only after we've made a