/// essentially you should default to using a SimpleRefChannelManager, and use a
/// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
/// you're using lightning-net-tokio.
+//
+// Lock order:
+// The tree structure below illustrates the lock order requirements for the different locks of the
+// `ChannelManager`. Locks can be held at the same time if they are on the same branch in the tree,
+// and should then be taken in the order of the lowest to the highest level in the tree.
+// Note that locks on different branches shall not be taken at the same time, as doing so will
+// create a new lock order for those specific locks in the order they were taken.
+//
+// Lock order tree:
+//
+// `total_consistency_lock`
+// |
+// |__`forward_htlcs`
+// |
+// |__`channel_state`
+// | |
+// | |__`id_to_peer`
+// | |
+// | |__`per_peer_state`
+// | |
+// | |__`outbound_scid_aliases`
+// | |
+// | |__`pending_inbound_payments`
+// | |
+// | |__`pending_outbound_payments`
+// | |
+// | |__`best_block`
+// | |
+// | |__`pending_events`
+// | |
+// | |__`pending_background_events`
+//
pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
chain_monitor: M,
tx_broadcaster: T,
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
#[cfg(test)]
pub(super) best_block: RwLock<BestBlock>,
#[cfg(not(test))]
best_block: RwLock<BestBlock>,
secp_ctx: Secp256k1<secp256k1::All>,
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
#[cfg(any(test, feature = "_test_utils"))]
pub(super) channel_state: Mutex<ChannelHolder<Signer>>,
#[cfg(not(any(test, feature = "_test_utils")))]
/// expose them to users via a PaymentReceived event. HTLCs which do not meet the requirements
/// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed
/// after we generate a PaymentReceived upon receipt of all MPP parts or when they time out.
- /// Locked *after* channel_state.
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
/// The session_priv bytes and retry metadata of outbound payments which are pending resolution.
///
/// See `PendingOutboundPayment` documentation for more info.
///
- /// Locked *after* channel_state.
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
pending_outbound_payments: Mutex<HashMap<PaymentId, PendingOutboundPayment>>,
/// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
///
/// Note that no consistency guarantees are made about the existence of a channel with the
/// `short_channel_id` here, nor the `short_channel_id` in the `PendingHTLCInfo`!
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
#[cfg(test)]
pub(super) forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
#[cfg(not(test))]
/// and some closed channels which reached a usable state prior to being closed. This is used
/// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
/// active channel list on load.
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
outbound_scid_aliases: Mutex<HashSet<u64>>,
/// `channel_id` -> `counterparty_node_id`.
/// We should add `counterparty_node_id`s to `MonitorEvent`s, and eventually rely on it in the
/// future. That would make this map redundant, as only the `ChannelManager::per_peer_state` is
/// required to access the channel with the `counterparty_node_id`.
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
id_to_peer: Mutex<HashMap<[u8; 32], PublicKey>>,
our_network_key: SecretKey,
/// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
/// new channel.
///
- /// If also holding `channel_state` lock, must lock `channel_state` prior to `per_peer_state`.
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
pending_events: Mutex<Vec<events::Event>>,
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
pending_background_events: Mutex<Vec<BackgroundEvent>>,
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
/// Essentially just when we're serializing ourselves out.
// Transactions are evaluated as final by network mempools at the next block. However, the modules
// constituting our Lightning node might not have perfect sync about their blockchain views. Thus, if
// the wallet module is in advance on the LDK view, allow one more block of headroom.
- // TODO: updated if/when https://github.com/rust-bitcoin/rust-bitcoin/pull/994 landed and rust-bitcoin bumped.
if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 2 {
return Err(APIError::APIMisuseError {
err: "Funding transaction absolute timelock is non-final".to_owned()
best_block.block_hash().write(writer)?;
}
- let channel_state = self.channel_state.lock().unwrap();
- let mut unfunded_channels = 0;
- for (_, channel) in channel_state.by_id.iter() {
- if !channel.is_funding_initiated() {
- unfunded_channels += 1;
+ {
+ // Take `channel_state` lock temporarily to avoid creating a lock order that requires
+ // that the `forward_htlcs` lock is taken after `channel_state`
+ let channel_state = self.channel_state.lock().unwrap();
+ let mut unfunded_channels = 0;
+ for (_, channel) in channel_state.by_id.iter() {
+ if !channel.is_funding_initiated() {
+ unfunded_channels += 1;
+ }
}
- }
- ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
- for (_, channel) in channel_state.by_id.iter() {
- if channel.is_funding_initiated() {
- channel.write(writer)?;
+ ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
+ for (_, channel) in channel_state.by_id.iter() {
+ if channel.is_funding_initiated() {
+ channel.write(writer)?;
+ }
}
}
- let forward_htlcs = self.forward_htlcs.lock().unwrap();
- (forward_htlcs.len() as u64).write(writer)?;
- for (short_channel_id, pending_forwards) in forward_htlcs.iter() {
- short_channel_id.write(writer)?;
- (pending_forwards.len() as u64).write(writer)?;
- for forward in pending_forwards {
- forward.write(writer)?;
+ {
+ let forward_htlcs = self.forward_htlcs.lock().unwrap();
+ (forward_htlcs.len() as u64).write(writer)?;
+ for (short_channel_id, pending_forwards) in forward_htlcs.iter() {
+ short_channel_id.write(writer)?;
+ (pending_forwards.len() as u64).write(writer)?;
+ for forward in pending_forwards {
+ forward.write(writer)?;
+ }
}
}
+ let channel_state = self.channel_state.lock().unwrap();
let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
(channel_state.claimable_htlcs.len() as u64).write(writer)?;
for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() {