From def193d6bda4c46efddd13847612ceb58d040dc1 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 30 Nov 2022 05:47:16 +0000 Subject: [PATCH] Don't hold `channel_state` lock for entire duration of claim_funds When `claim_funds` has to claim multiple HTLCs as a part of a single MPP payment, it currently does so holding the `channel_state` lock for the entire duration of the claim loop. Here we swap that for taking the lock once for each HTLC. This allows us to be more flexible with locks going forward, and ultimately isn't a huge change - if our counterparty intends to force-close a channel, us choosing to ignore it by holding the `channel_state` lock for the duration of the claim isn't going to result in a commitment update, it will just result in the preimage already being in the `ChannelMonitor`. --- lightning/src/ln/channelmanager.rs | 38 ++++++++++++++++++------------ 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 95d11ebc3..b0742ab05 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4281,10 +4281,14 @@ impl ChannelManager ChannelManager chan_id.clone(), @@ -4308,7 +4311,7 @@ impl ChannelManager ChannelManager { if let msgs::ErrorAction::IgnoreError = err.err.action { // We got a temporary failure updating monitor, but will claim the @@ -4357,7 +4361,12 @@ impl ChannelManager unreachable!("We already checked for channel existence, we can't fail here!"), + ClaimFundsFromHop::PrevHopForceClosed => { + // This should be incredibly rare - we checked that all the channels were + // open above, though as we release the lock at each loop iteration it's + // still possible. We should still claim the HTLC on-chain through the + // closed-channel-update generated in claim_funds_from_hop. + }, ClaimFundsFromHop::DuplicateClaim => { // While we should never get here in most cases, if we do, it likely // indicates that the HTLC was timed out some time ago and is no longer @@ -4368,7 +4377,7 @@ impl ChannelManager ChannelManager::Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { + fn claim_funds_from_hop(&self, mut channel_state_lock: MutexGuard::Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { //TODO: Delay the claimed_funds relaying just like we do outbound relay! let chan_id = prev_hop.outpoint.to_channel_id(); - let channel_state = &mut **channel_state_lock; + let channel_state = &mut *channel_state_lock; if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { Ok(msgs_monitor_option) => { @@ -4499,7 +4508,7 @@ impl ChannelManager::Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { + fn claim_funds_internal(&self, channel_state_lock: MutexGuard::Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { mem::drop(channel_state_lock); @@ -4546,7 +4555,7 @@ impl ChannelManager { let prev_outpoint = hop_data.outpoint; - let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage); + let res = self.claim_funds_from_hop(channel_state_lock, hop_data, payment_preimage); let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true }; let htlc_claim_value_msat = match res { ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt, @@ -4560,7 +4569,6 @@ impl ChannelManager = Err(err); let _ = handle_error!(self, result, pk); -- 2.39.5