Merge pull request #1886 from TheBlueMatt/2022-11-claim-relock
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Mon, 12 Dec 2022 03:10:38 +0000 (03:10 +0000)
committerGitHub <noreply@github.com>
Mon, 12 Dec 2022 03:10:38 +0000 (03:10 +0000)
Relock `channel_state` in for each HTLC in `claim_funds` and lay the groundwork for async event generation

1  2 
lightning/src/ln/channelmanager.rs

index bb99b02d9f9a90bdafee770671100a22a95e2e70,0d09ead3aca950ac0f409f28db5dfe95088a5d37..83334c77bf39d895252a222cab80ff00143fad65
@@@ -49,7 -49,6 +49,7 @@@ use crate::ln::features::InvoiceFeature
  use crate::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteHop, RoutePath, RouteParameters};
  use crate::ln::msgs;
  use crate::ln::onion_utils;
 +use crate::ln::onion_utils::HTLCFailReason;
  use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT};
  use crate::ln::wire::Encode;
  use crate::chain::keysinterface::{Sign, KeysInterface, KeysManager, Recipient};
@@@ -277,20 -276,33 +277,12 @@@ impl HTLCSource 
        }
  }
  
 -#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
 -pub(super) enum HTLCFailReason {
 -      LightningError {
 -              err: msgs::OnionErrorPacket,
 -      },
 -      Reason {
 -              failure_code: u16,
 -              data: Vec<u8>,
 -      }
 -}
 -
 -impl HTLCFailReason {
 -      pub(super) fn reason(failure_code: u16, data: Vec<u8>) -> Self {
 -              Self::Reason { failure_code, data }
 -      }
 -
 -      pub(super) fn from_failure_code(failure_code: u16) -> Self {
 -              Self::Reason { failure_code, data: Vec::new() }
 -      }
 -}
 -
  struct ReceiveError {
        err_code: u16,
        err_data: Vec<u8>,
        msg: &'static str,
  }
  
- /// Return value for claim_funds_from_hop
- enum ClaimFundsFromHop {
-       PrevHopForceClosed,
-       MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option<u64>),
-       Success(u64),
-       DuplicateClaim,
- }
  type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
  
  /// Error type returned across the channel_state mutex boundary. When an Err is generated for a
@@@ -404,6 -416,36 +396,36 @@@ pub(super) enum RAACommitmentOrder 
        RevokeAndACKFirst,
  }
  
+ /// Information about a payment which is currently being claimed.
+ struct ClaimingPayment {
+       amount_msat: u64,
+       payment_purpose: events::PaymentPurpose,
+       receiver_node_id: PublicKey,
+ }
+ impl_writeable_tlv_based!(ClaimingPayment, {
+       (0, amount_msat, required),
+       (2, payment_purpose, required),
+       (4, receiver_node_id, required),
+ });
+ /// Information about claimable or being-claimed payments
+ struct ClaimablePayments {
+       /// Map from payment hash to the payment data and any HTLCs which are to us and can be
+       /// failed/claimed by the user.
+       ///
+       /// Note that, no consistency guarantees are made about the channels given here actually
+       /// existing anymore by the time you go to read them!
+       ///
+       /// When adding to the map, [`Self::pending_claiming_payments`] must also be checked to ensure
+       /// we don't get a duplicate payment.
+       claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
+       /// Map from payment hash to the payment data for HTLCs which we have begun claiming, but which
+       /// are waiting on a [`ChannelMonitorUpdate`] to complete in order to be surfaced to the user
+       /// as an [`events::Event::PaymentClaimed`].
+       pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
+ }
  // Note this is only exposed in cfg(test):
  pub(super) struct ChannelHolder<Signer: Sign> {
        pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
@@@ -421,6 -463,16 +443,16 @@@ enum BackgroundEvent 
        ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
  }
  
+ pub(crate) enum MonitorUpdateCompletionAction {
+       /// Indicates that a payment ultimately destined for us was claimed and we should emit an
+       /// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for
+       /// this payment. Note that this is only best-effort. On restart it's possible such a duplicate
+       /// event can be generated.
+       PaymentClaimed { payment_hash: PaymentHash },
+       /// Indicates an [`events::Event`] should be surfaced to the user.
+       EmitEvent { event: events::Event },
+ }
  /// State we hold per-peer. In the future we should put channels in here, but for now we only hold
  /// the latest Init features we heard from the peer.
  struct PeerState {
@@@ -679,7 -731,7 +711,7 @@@ pub type SimpleRefChannelManager<'a, 'b
  //  |
  //  |__`pending_inbound_payments`
  //  |   |
- //  |   |__`claimable_htlcs`
+ //  |   |__`claimable_payments`
  //  |   |
  //  |   |__`pending_outbound_payments`
  //  |       |
@@@ -767,14 -819,11 +799,11 @@@ pub struct ChannelManager<M: Deref, T: 
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
  
-       /// Map from payment hash to the payment data and any HTLCs which are to us and can be
-       /// failed/claimed by the user.
-       ///
-       /// Note that, no consistency guarantees are made about the channels given here actually
-       /// existing anymore by the time you go to read them!
+       /// The sets of payments which are claimable or currently being claimed. See
+       /// [`ClaimablePayments`]' individual field docs for more info.
        ///
        /// See `ChannelManager` struct-level documentation for lock order requirements.
-       claimable_htlcs: Mutex<HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>>,
+       claimable_payments: Mutex<ClaimablePayments>,
  
        /// The set of outbound SCID aliases across all our channels, including unconfirmed channels
        /// and some closed channels which reached a usable state prior to being closed. This is used
@@@ -1580,7 -1629,7 +1609,7 @@@ impl<M: Deref, T: Deref, K: Deref, F: D
                        pending_inbound_payments: Mutex::new(HashMap::new()),
                        pending_outbound_payments: Mutex::new(HashMap::new()),
                        forward_htlcs: Mutex::new(HashMap::new()),
-                       claimable_htlcs: Mutex::new(HashMap::new()),
+                       claimable_payments: Mutex::new(ClaimablePayments { claimable_htlcs: HashMap::new(), pending_claiming_payments: HashMap::new() }),
                        pending_intercepted_htlcs: Mutex::new(HashMap::new()),
                        id_to_peer: Mutex::new(HashMap::new()),
                        short_to_chan_info: FairRwLock::new(HashMap::new()),
                // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
                // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
                // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
 -              if (hop_data.outgoing_cltv_value as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1  {
 +              let current_height: u32 = self.best_block.read().unwrap().height();
 +              if (hop_data.outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
 +                      let mut err_data = Vec::with_capacity(12);
 +                      err_data.extend_from_slice(&amt_msat.to_be_bytes());
 +                      err_data.extend_from_slice(&current_height.to_be_bytes());
                        return Err(ReceiveError {
 -                              err_code: 17,
 -                              err_data: Vec::new(),
 +                              err_code: 0x4000 | 15, err_data,
                                msg: "The final CLTV expiry is too soon to handle",
                        });
                }
                                        return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
 -                                              reason: onion_utils::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
 +                                              reason: HTLCFailReason::reason($err_code, $data.to_vec())
 +                                                      .get_encrypted_failure_packet(&shared_secret, &None),
                                        }));
                                }
                        }
                        // with a short_channel_id of 0. This is important as various things later assume
                        // short_channel_id is non-0 in any ::Forward.
                        if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
 -                              if let Some((err, code, chan_update)) = loop {
 +                              if let Some((err, mut code, chan_update)) = loop {
                                        let id_option = self.short_to_chan_info.read().unwrap().get(&short_channel_id).cloned();
                                        let mut channel_state = self.channel_state.lock().unwrap();
                                        let forwarding_id_opt = match id_option {
                                                }
                                                chan_update_opt
                                        } else {
 -                                              if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
 +                                              if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
 +                                                      // We really should set `incorrect_cltv_expiry` here but as we're not
 +                                                      // forwarding over a real channel we can't generate a channel_update
 +                                                      // for it. Instead we just return a generic temporary_node_failure.
                                                        break Some((
                                                                "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
 -                                                              0x1000 | 13, None,
 +                                                              0x2000 | 2, None,
                                                        ));
                                                }
                                                None
                                                (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
                                                msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
                                                chan_update.write(&mut res).expect("Writes cannot fail");
 +                                      } else if code & 0x1000 == 0x1000 {
 +                                              // If we're trying to return an error that requires a `channel_update` but
 +                                              // we're forwarding to a phantom or intercept "channel" (i.e. cannot
 +                                              // generate an update), just use the generic "temporary_node_failure"
 +                                              // instead.
 +                                              code = 0x2000 | 2;
                                        }
                                        return_err!(err, code, &res.0[..]);
                                }
                let mut new_events = Vec::new();
                let mut failed_forwards = Vec::new();
                let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
 -              let mut handle_errors = Vec::new();
                {
                        let mut forward_htlcs = HashMap::new();
                        mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
                                                        continue;
                                                },
                                                hash_map::Entry::Occupied(mut chan) => {
 -                                                      let mut add_htlc_msgs = Vec::new();
 -                                                      let mut fail_htlc_msgs = Vec::new();
                                                        for forward_info in pending_forwards.drain(..) {
                                                                match forward_info {
                                                                        HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
                                                                                        // Phantom payments are only PendingHTLCRouting::Receive.
                                                                                        phantom_shared_secret: None,
                                                                                });
 -                                                                              match chan.get_mut().send_htlc(outgoing_amt_msat, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) {
 -                                                                                      Err(e) => {
 -                                                                                              if let ChannelError::Ignore(msg) = e {
 -                                                                                                      log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
 -                                                                                              } else {
 -                                                                                                      panic!("Stated return value requirements in send_htlc() were not met");
 -                                                                                              }
 -                                                                                              let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
 -                                                                                              failed_forwards.push((htlc_source, payment_hash,
 -                                                                                                      HTLCFailReason::reason(failure_code, data),
 -                                                                                                      HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
 -                                                                                              ));
 -                                                                                              continue;
 -                                                                                      },
 -                                                                                      Ok(update_add) => {
 -                                                                                              match update_add {
 -                                                                                                      Some(msg) => { add_htlc_msgs.push(msg); },
 -                                                                                                      None => {
 -                                                                                                              // Nothing to do here...we're waiting on a remote
 -                                                                                                              // revoke_and_ack before we can add anymore HTLCs. The Channel
 -                                                                                                              // will automatically handle building the update_add_htlc and
 -                                                                                                              // commitment_signed messages when we can.
 -                                                                                                              // TODO: Do some kind of timer to set the channel as !is_live()
 -                                                                                                              // as we don't really want others relying on us relaying through
 -                                                                                                              // this channel currently :/.
 -                                                                                                      }
 -                                                                                              }
 +                                                                              if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat,
 +                                                                                      payment_hash, outgoing_cltv_value, htlc_source.clone(),
 +                                                                                      onion_packet, &self.logger)
 +                                                                              {
 +                                                                                      if let ChannelError::Ignore(msg) = e {
 +                                                                                              log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
 +                                                                                      } else {
 +                                                                                              panic!("Stated return value requirements in send_htlc() were not met");
                                                                                        }
 +                                                                                      let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
 +                                                                                      failed_forwards.push((htlc_source, payment_hash,
 +                                                                                              HTLCFailReason::reason(failure_code, data),
 +                                                                                              HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
 +                                                                                      ));
 +                                                                                      continue;
                                                                                }
                                                                        },
                                                                        HTLCForwardInfo::AddHTLC { .. } => {
                                                                        },
                                                                        HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
                                                                                log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
 -                                                                              match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
 -                                                                                      Err(e) => {
 -                                                                                              if let ChannelError::Ignore(msg) = e {
 -                                                                                                      log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
 -                                                                                              } else {
 -                                                                                                      panic!("Stated return value requirements in get_update_fail_htlc() were not met");
 -                                                                                              }
 -                                                                                              // fail-backs are best-effort, we probably already have one
 -                                                                                              // pending, and if not that's OK, if not, the channel is on
 -                                                                                              // the chain and sending the HTLC-Timeout is their problem.
 -                                                                                              continue;
 -                                                                                      },
 -                                                                                      Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
 -                                                                                      Ok(None) => {
 -                                                                                              // Nothing to do here...we're waiting on a remote
 -                                                                                              // revoke_and_ack before we can update the commitment
 -                                                                                              // transaction. The Channel will automatically handle
 -                                                                                              // building the update_fail_htlc and commitment_signed
 -                                                                                              // messages when we can.
 -                                                                                              // We don't need any kind of timer here as they should fail
 -                                                                                              // the channel onto the chain if they can't get our
 -                                                                                              // update_fail_htlc in time, it's not our problem.
 +                                                                              if let Err(e) = chan.get_mut().queue_fail_htlc(
 +                                                                                      htlc_id, err_packet, &self.logger
 +                                                                              ) {
 +                                                                                      if let ChannelError::Ignore(msg) = e {
 +                                                                                              log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
 +                                                                                      } else {
 +                                                                                              panic!("Stated return value requirements in queue_fail_htlc() were not met");
                                                                                        }
 +                                                                                      // fail-backs are best-effort, we probably already have one
 +                                                                                      // pending, and if not that's OK, if not, the channel is on
 +                                                                                      // the chain and sending the HTLC-Timeout is their problem.
 +                                                                                      continue;
                                                                                }
                                                                        },
                                                                }
                                                        }
 -
 -                                                      if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
 -                                                              let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) {
 -                                                                      Ok(res) => res,
 -                                                                      Err(e) => {
 -                                                                              // We surely failed send_commitment due to bad keys, in that case
 -                                                                              // close channel and then send error message to peer.
 -                                                                              let counterparty_node_id = chan.get().get_counterparty_node_id();
 -                                                                              let err: Result<(), _>  = match e {
 -                                                                                      ChannelError::Ignore(_) | ChannelError::Warn(_) => {
 -                                                                                              panic!("Stated return value requirements in send_commitment() were not met");
 -                                                                                      }
 -                                                                                      ChannelError::Close(msg) => {
 -                                                                                              log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
 -                                                                                              let mut channel = remove_channel!(self, chan);
 -                                                                                              // ChannelClosed event is generated by handle_error for us.
 -                                                                                              Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
 -                                                                                      },
 -                                                                              };
 -                                                                              handle_errors.push((counterparty_node_id, err));
 -                                                                              continue;
 -                                                                      }
 -                                                              };
 -                                                              match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
 -                                                                      ChannelMonitorUpdateStatus::Completed => {},
 -                                                                      e => {
 -                                                                              handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
 -                                                                              continue;
 -                                                                      }
 -                                                              }
 -                                                              log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
 -                                                                      add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
 -                                                              channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
 -                                                                      node_id: chan.get().get_counterparty_node_id(),
 -                                                                      updates: msgs::CommitmentUpdate {
 -                                                                              update_add_htlcs: add_htlc_msgs,
 -                                                                              update_fulfill_htlcs: Vec::new(),
 -                                                                              update_fail_htlcs: fail_htlc_msgs,
 -                                                                              update_fail_malformed_htlcs: Vec::new(),
 -                                                                              update_fee: None,
 -                                                                              commitment_signed: commitment_msg,
 -                                                                      },
 -                                                              });
 -                                                      }
                                                }
                                        }
                                } else {
  
                                                                macro_rules! check_total_value {
                                                                        ($payment_data: expr, $payment_preimage: expr) => {{
 -                                                                              let mut payment_received_generated = false;
 +                                                                              let mut payment_claimable_generated = false;
                                                                                let purpose = || {
                                                                                        events::PaymentPurpose::InvoicePayment {
                                                                                                payment_preimage: $payment_preimage,
                                                                                                payment_secret: $payment_data.payment_secret,
                                                                                        }
                                                                                };
-                                                                               let mut claimable_htlcs = self.claimable_htlcs.lock().unwrap();
-                                                                               let (_, htlcs) = claimable_htlcs.entry(payment_hash)
+                                                                               let mut claimable_payments = self.claimable_payments.lock().unwrap();
+                                                                               if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
+                                                                                       continue
+                                                                               }
+                                                                               let (_, htlcs) = claimable_payments.claimable_htlcs.entry(payment_hash)
                                                                                        .or_insert_with(|| (purpose(), Vec::new()));
                                                                                if htlcs.len() == 1 {
                                                                                        if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
                                                                                                via_channel_id: Some(prev_channel_id),
                                                                                                via_user_channel_id: Some(prev_user_channel_id),
                                                                                        });
 -                                                                                      payment_received_generated = true;
 +                                                                                      payment_claimable_generated = true;
                                                                                } else {
                                                                                        // Nothing to do - we haven't reached the total
                                                                                        // payment value yet, wait until we receive more
                                                                                        // MPP parts.
                                                                                        htlcs.push(claimable_htlc);
                                                                                }
 -                                                                              payment_received_generated
 +                                                                              payment_claimable_generated
                                                                        }}
                                                                }
  
                                                                                                check_total_value!(payment_data, payment_preimage);
                                                                                        },
                                                                                        OnionPayload::Spontaneous(preimage) => {
-                                                                                               match self.claimable_htlcs.lock().unwrap().entry(payment_hash) {
+                                                                                               let mut claimable_payments = self.claimable_payments.lock().unwrap();
+                                                                                               if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
+                                                                                                       fail_htlc!(claimable_htlc, payment_hash);
+                                                                                                       continue
+                                                                                               }
+                                                                                               match claimable_payments.claimable_htlcs.entry(payment_hash) {
                                                                                                        hash_map::Entry::Vacant(e) => {
                                                                                                                let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
                                                                                                                e.insert((purpose.clone(), vec![claimable_htlc]));
                                                                                                log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else {
 -                                                                                      let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
 -                                                                                      if payment_received_generated {
 +                                                                                      let payment_claimable_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
 +                                                                                      if payment_claimable_generated {
                                                                                                inbound_payment.remove_entry();
                                                                                        }
                                                                                }
                }
                self.forward_htlcs(&mut phantom_receives);
  
 -              for (counterparty_node_id, err) in handle_errors.drain(..) {
 -                      let _ = handle_error!(self, err, counterparty_node_id);
 -              }
 +              // Freeing the holding cell here is relatively redundant - in practice we'll do it when we
 +              // next get a `get_and_clear_pending_msg_events` call, but some tests rely on it, and it's
 +              // nice to do the work now if we can rather than while we're trying to get messages in the
 +              // network stack.
 +              self.check_free_holding_cells();
  
                if new_events.is_empty() { return }
                let mut events = self.pending_events.lock().unwrap();
                self.process_background_events();
        }
  
 -      fn update_channel_fee(&self, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<<K::Target as KeysInterface>::Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
 -              if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); }
 +      fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<K::Target as KeysInterface>::Signer>, new_feerate: u32) -> NotifyOption {
 +              if !chan.is_outbound() { return NotifyOption::SkipPersist; }
                // If the feerate has decreased by less than half, don't bother
                if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
                        log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
                                log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
 -                      return (true, NotifyOption::SkipPersist, Ok(()));
 +                      return NotifyOption::SkipPersist;
                }
                if !chan.is_live() {
                        log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
                                log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
 -                      return (true, NotifyOption::SkipPersist, Ok(()));
 +                      return NotifyOption::SkipPersist;
                }
                log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
                        log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
  
 -              let mut retain_channel = true;
 -              let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) {
 -                      Ok(res) => Ok(res),
 -                      Err(e) => {
 -                              let (drop, res) = convert_chan_err!(self, e, chan, chan_id);
 -                              if drop { retain_channel = false; }
 -                              Err(res)
 -                      }
 -              };
 -              let ret_err = match res {
 -                      Ok(Some((update_fee, commitment_signed, monitor_update))) => {
 -                              match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
 -                                      ChannelMonitorUpdateStatus::Completed => {
 -                                              pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
 -                                                      node_id: chan.get_counterparty_node_id(),
 -                                                      updates: msgs::CommitmentUpdate {
 -                                                              update_add_htlcs: Vec::new(),
 -                                                              update_fulfill_htlcs: Vec::new(),
 -                                                              update_fail_htlcs: Vec::new(),
 -                                                              update_fail_malformed_htlcs: Vec::new(),
 -                                                              update_fee: Some(update_fee),
 -                                                              commitment_signed,
 -                                                      },
 -                                              });
 -                                              Ok(())
 -                                      },
 -                                      e => {
 -                                              let (res, drop) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
 -                                              if drop { retain_channel = false; }
 -                                              res
 -                                      }
 -                              }
 -                      },
 -                      Ok(None) => Ok(()),
 -                      Err(e) => Err(e),
 -              };
 -              (retain_channel, NotifyOption::DoPersist, ret_err)
 +              chan.queue_update_fee(new_feerate, &self.logger);
 +              NotifyOption::DoPersist
        }
  
        #[cfg(fuzzing)]
  
                        let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
  
 -                      let mut handle_errors = Vec::new();
 -                      {
 -                              let mut channel_state_lock = self.channel_state.lock().unwrap();
 -                              let channel_state = &mut *channel_state_lock;
 -                              let pending_msg_events = &mut channel_state.pending_msg_events;
 -                              channel_state.by_id.retain(|chan_id, chan| {
 -                                      let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(pending_msg_events, chan_id, chan, new_feerate);
 -                                      if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
 -                                      if err.is_err() {
 -                                              handle_errors.push(err);
 -                                      }
 -                                      retain_channel
 -                              });
 +                      let mut channel_state = self.channel_state.lock().unwrap();
 +                      for (chan_id, chan) in channel_state.by_id.iter_mut() {
 +                              let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
 +                              if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
                        }
  
                        should_persist
  
                        let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
  
 -                      let mut handle_errors = Vec::new();
 +                      let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
                        let mut timed_out_mpp_htlcs = Vec::new();
                        {
                                let mut channel_state_lock = self.channel_state.lock().unwrap();
                                let channel_state = &mut *channel_state_lock;
                                let pending_msg_events = &mut channel_state.pending_msg_events;
                                channel_state.by_id.retain(|chan_id, chan| {
 -                                      let counterparty_node_id = chan.get_counterparty_node_id();
 -                                      let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(pending_msg_events, chan_id, chan, new_feerate);
 +                                      let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
                                        if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
 -                                      if err.is_err() {
 -                                              handle_errors.push((err, counterparty_node_id));
 -                                      }
 -                                      if !retain_channel { return false; }
  
                                        if let Err(e) = chan.timer_check_closing_negotiation_progress() {
                                                let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id);
                                });
                        }
  
-                       self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+                       self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
                                if htlcs.is_empty() {
                                        // This should be unreachable
                                        debug_assert!(false);
  
                        self.remove_stale_resolved_payments();
  
 +                      // Technically we don't need to do this here, but if we have holding cell entries in a
 +                      // channel that need freeing, it's better to do that here and block a background task
 +                      // than block the message queueing pipeline.
 +                      if self.check_free_holding_cells() {
 +                              should_persist = NotifyOption::DoPersist;
 +                      }
 +
                        should_persist
                });
        }
        pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
  
-               let removed_source = self.claimable_htlcs.lock().unwrap().remove(payment_hash);
+               let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash);
                if let Some((_, mut sources)) = removed_source {
                        for htlc in sources.drain(..) {
                                let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
                                } else { None };
                                log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
  
 -                              let path_failure = match &onion_error {
 -                                      &HTLCFailReason::LightningError { ref err } => {
 +                              let path_failure = {
  #[cfg(test)]
 -                                              let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
 +                                      let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_error.decode_onion_failure(&self.secp_ctx, &self.logger, &source);
  #[cfg(not(test))]
 -                                              let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
 -
 -                                              if self.payment_is_probe(payment_hash, &payment_id) {
 -                                                      if !payment_retryable {
 -                                                              events::Event::ProbeSuccessful {
 -                                                                      payment_id: *payment_id,
 -                                                                      payment_hash: payment_hash.clone(),
 -                                                                      path: path.clone(),
 -                                                              }
 -                                                      } else {
 -                                                              events::Event::ProbeFailed {
 -                                                                      payment_id: *payment_id,
 -                                                                      payment_hash: payment_hash.clone(),
 -                                                                      path: path.clone(),
 -                                                                      short_channel_id,
 -                                                              }
 -                                                      }
 -                                              } else {
 -                                                      // TODO: If we decided to blame ourselves (or one of our channels) in
 -                                                      // process_onion_failure we should close that channel as it implies our
 -                                                      // next-hop is needlessly blaming us!
 -                                                      if let Some(scid) = short_channel_id {
 -                                                              retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
 -                                                      }
 -                                                      events::Event::PaymentPathFailed {
 -                                                              payment_id: Some(*payment_id),
 -                                                              payment_hash: payment_hash.clone(),
 -                                                              payment_failed_permanently: !payment_retryable,
 -                                                              network_update,
 -                                                              all_paths_failed,
 -                                                              path: path.clone(),
 -                                                              short_channel_id,
 -                                                              retry,
 -                                                              #[cfg(test)]
 -                                                              error_code: onion_error_code,
 -                                                              #[cfg(test)]
 -                                                              error_data: onion_error_data
 -                                                      }
 -                                              }
 -                                      },
 -                                      &HTLCFailReason::Reason {
 -#[cfg(test)]
 -                                                      ref failure_code,
 -#[cfg(test)]
 -                                                      ref data,
 -                                                      .. } => {
 -                                              // we get a fail_malformed_htlc from the first hop
 -                                              // TODO: We'd like to generate a NetworkUpdate for temporary
 -                                              // failures here, but that would be insufficient as find_route
 -                                              // generally ignores its view of our own channels as we provide them via
 -                                              // ChannelDetails.
 -                                              // TODO: For non-temporary failures, we really should be closing the
 -                                              // channel here as we apparently can't relay through them anyway.
 -                                              let scid = path.first().unwrap().short_channel_id;
 -                                              retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
 -
 -                                              if self.payment_is_probe(payment_hash, &payment_id) {
 -                                                      events::Event::ProbeFailed {
 +                                      let (network_update, short_channel_id, payment_retryable, _, _) = onion_error.decode_onion_failure(&self.secp_ctx, &self.logger, &source);
 +
 +                                      if self.payment_is_probe(payment_hash, &payment_id) {
 +                                              if !payment_retryable {
 +                                                      events::Event::ProbeSuccessful {
                                                                payment_id: *payment_id,
                                                                payment_hash: payment_hash.clone(),
                                                                path: path.clone(),
 -                                                              short_channel_id: Some(scid),
                                                        }
                                                } else {
 -                                                      events::Event::PaymentPathFailed {
 -                                                              payment_id: Some(*payment_id),
 +                                                      events::Event::ProbeFailed {
 +                                                              payment_id: *payment_id,
                                                                payment_hash: payment_hash.clone(),
 -                                                              payment_failed_permanently: false,
 -                                                              network_update: None,
 -                                                              all_paths_failed,
                                                                path: path.clone(),
 -                                                              short_channel_id: Some(scid),
 -                                                              retry,
 -#[cfg(test)]
 -                                                              error_code: Some(*failure_code),
 -#[cfg(test)]
 -                                                              error_data: Some(data.clone()),
 +                                                              short_channel_id,
                                                        }
                                                }
 +                                      } else {
 +                                              // TODO: If we decided to blame ourselves (or one of our channels) in
 +                                              // process_onion_failure we should close that channel as it implies our
 +                                              // next-hop is needlessly blaming us!
 +                                              if let Some(scid) = short_channel_id {
 +                                                      retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
 +                                              }
 +                                              events::Event::PaymentPathFailed {
 +                                                      payment_id: Some(*payment_id),
 +                                                      payment_hash: payment_hash.clone(),
 +                                                      payment_failed_permanently: !payment_retryable,
 +                                                      network_update,
 +                                                      all_paths_failed,
 +                                                      path: path.clone(),
 +                                                      short_channel_id,
 +                                                      retry,
 +                                                      #[cfg(test)]
 +                                                      error_code: onion_error_code,
 +                                                      #[cfg(test)]
 +                                                      error_data: onion_error_data
 +                                              }
                                        }
                                };
                                let mut pending_events = self.pending_events.lock().unwrap();
                                if let Some(ev) = full_failure_ev { pending_events.push(ev); }
                        },
                        HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => {
 -                              let err_packet = match onion_error {
 -                                      HTLCFailReason::Reason { ref failure_code, ref data } => {
 -                                              log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
 -                                              if let Some(phantom_ss) = phantom_shared_secret {
 -                                                      let phantom_packet = onion_utils::build_failure_packet(phantom_ss, *failure_code, &data[..]).encode();
 -                                                      let encrypted_phantom_packet = onion_utils::encrypt_failure_packet(phantom_ss, &phantom_packet);
 -                                                      onion_utils::encrypt_failure_packet(incoming_packet_shared_secret, &encrypted_phantom_packet.data[..])
 -                                              } else {
 -                                                      let packet = onion_utils::build_failure_packet(incoming_packet_shared_secret, *failure_code, &data[..]).encode();
 -                                                      onion_utils::encrypt_failure_packet(incoming_packet_shared_secret, &packet)
 -                                              }
 -                                      },
 -                                      HTLCFailReason::LightningError { err } => {
 -                                              log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards with pre-built LightningError", log_bytes!(payment_hash.0));
 -                                              onion_utils::encrypt_failure_packet(incoming_packet_shared_secret, &err.data)
 -                                      }
 -                              };
 +                              log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", log_bytes!(payment_hash.0), onion_error);
 +                              let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret);
  
                                let mut forward_event = None;
                                let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
        /// [`process_pending_events`]: EventsProvider::process_pending_events
        /// [`create_inbound_payment`]: Self::create_inbound_payment
        /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
-       /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
        pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
                let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
  
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
  
-               let removed_source = self.claimable_htlcs.lock().unwrap().remove(&payment_hash);
-               if let Some((payment_purpose, mut sources)) = removed_source {
-                       assert!(!sources.is_empty());
-                       // If we are claiming an MPP payment, we have to take special care to ensure that each
-                       // channel exists before claiming all of the payments (inside one lock).
-                       // Note that channel existance is sufficient as we should always get a monitor update
-                       // which will take care of the real HTLC claim enforcement.
-                       //
-                       // If we find an HTLC which we would need to claim but for which we do not have a
-                       // channel, we will fail all parts of the MPP payment. While we could wait and see if
-                       // the sender retries the already-failed path(s), it should be a pretty rare case where
-                       // we got all the HTLCs and then a channel closed while we were waiting for the user to
-                       // provide the preimage, so worrying too much about the optimal handling isn't worth
-                       // it.
-                       let mut claimable_amt_msat = 0;
-                       let mut expected_amt_msat = None;
-                       let mut valid_mpp = true;
-                       let mut errs = Vec::new();
-                       let mut claimed_any_htlcs = false;
-                       let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = &mut *channel_state_lock;
-                       let mut receiver_node_id = Some(self.our_network_pubkey);
-                       for htlc in sources.iter() {
-                               let chan_id = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
-                                       Some((_cp_id, chan_id)) => chan_id.clone(),
-                                       None => {
-                                               valid_mpp = false;
+               let mut sources = {
+                       let mut claimable_payments = self.claimable_payments.lock().unwrap();
+                       if let Some((payment_purpose, sources)) = claimable_payments.claimable_htlcs.remove(&payment_hash) {
+                               let mut receiver_node_id = self.our_network_pubkey;
+                               for htlc in sources.iter() {
+                                       if htlc.prev_hop.phantom_shared_secret.is_some() {
+                                               let phantom_pubkey = self.keys_manager.get_node_id(Recipient::PhantomNode)
+                                                       .expect("Failed to get node_id for phantom node recipient");
+                                               receiver_node_id = phantom_pubkey;
                                                break;
                                        }
-                               };
+                               }
  
-                               if let None = channel_state.by_id.get(&chan_id) {
-                                       valid_mpp = false;
-                                       break;
+                               let dup_purpose = claimable_payments.pending_claiming_payments.insert(payment_hash,
+                                       ClaimingPayment { amount_msat: sources.iter().map(|source| source.value).sum(),
+                                       payment_purpose, receiver_node_id,
+                               });
+                               if dup_purpose.is_some() {
+                                       debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
+                                       log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
+                                               log_bytes!(payment_hash.0));
                                }
+                               sources
+                       } else { return; }
+               };
+               debug_assert!(!sources.is_empty());
  
-                               if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) {
-                                       log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!");
-                                       debug_assert!(false);
+               // If we are claiming an MPP payment, we check that all channels which contain a claimable
+               // HTLC still exist. While this isn't guaranteed to remain true if a channel closes while
+               // we're claiming (or even after we claim, before the commitment update dance completes),
+               // it should be a relatively rare race, and we'd rather not claim HTLCs that require us to
+               // go on-chain (and lose the on-chain fee to do so) than just reject the payment.
+               //
+               // Note that we'll still always get our funds - as long as the generated
+               // `ChannelMonitorUpdate` makes it out to the relevant monitor we can claim on-chain.
+               //
+               // If we find an HTLC which we would need to claim but for which we do not have a
+               // channel, we will fail all parts of the MPP payment. While we could wait and see if
+               // the sender retries the already-failed path(s), it should be a pretty rare case where
+               // we got all the HTLCs and then a channel closed while we were waiting for the user to
+               // provide the preimage, so worrying too much about the optimal handling isn't worth
+               // it.
+               let mut claimable_amt_msat = 0;
+               let mut expected_amt_msat = None;
+               let mut valid_mpp = true;
+               let mut errs = Vec::new();
+               let mut channel_state = Some(self.channel_state.lock().unwrap());
+               for htlc in sources.iter() {
+                       let chan_id = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
+                               Some((_cp_id, chan_id)) => chan_id.clone(),
+                               None => {
                                        valid_mpp = false;
                                        break;
                                }
-                               expected_amt_msat = Some(htlc.total_msat);
-                               if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
-                                       // We don't currently support MPP for spontaneous payments, so just check
-                                       // that there's one payment here and move on.
-                                       if sources.len() != 1 {
-                                               log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!");
-                                               debug_assert!(false);
-                                               valid_mpp = false;
-                                               break;
-                                       }
-                               }
-                               let phantom_shared_secret = htlc.prev_hop.phantom_shared_secret;
-                               if phantom_shared_secret.is_some() {
-                                       let phantom_pubkey = self.keys_manager.get_node_id(Recipient::PhantomNode)
-                                               .expect("Failed to get node_id for phantom node recipient");
-                                       receiver_node_id = Some(phantom_pubkey)
-                               }
+                       };
  
-                               claimable_amt_msat += htlc.value;
-                       }
-                       if sources.is_empty() || expected_amt_msat.is_none() {
-                               log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
-                               return;
+                       if let None = channel_state.as_ref().unwrap().by_id.get(&chan_id) {
+                               valid_mpp = false;
+                               break;
                        }
-                       if claimable_amt_msat != expected_amt_msat.unwrap() {
-                               log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
-                                       expected_amt_msat.unwrap(), claimable_amt_msat);
-                               return;
+                       if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) {
+                               log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!");
+                               debug_assert!(false);
+                               valid_mpp = false;
+                               break;
                        }
-                       if valid_mpp {
-                               for htlc in sources.drain(..) {
-                                       match self.claim_funds_from_hop(&mut channel_state_lock, htlc.prev_hop, payment_preimage) {
-                                               ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
-                                                       if let msgs::ErrorAction::IgnoreError = err.err.action {
-                                                               // We got a temporary failure updating monitor, but will claim the
-                                                               // HTLC when the monitor updating is restored (or on chain).
-                                                               log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
-                                                               claimed_any_htlcs = true;
-                                                       } else { errs.push((pk, err)); }
-                                               },
-                                               ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"),
-                                               ClaimFundsFromHop::DuplicateClaim => {
-                                                       // While we should never get here in most cases, if we do, it likely
-                                                       // indicates that the HTLC was timed out some time ago and is no longer
-                                                       // available to be claimed. Thus, it does not make sense to set
-                                                       // `claimed_any_htlcs`.
-                                               },
-                                               ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true,
-                                       }
+                       expected_amt_msat = Some(htlc.total_msat);
+                       if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
+                               // We don't currently support MPP for spontaneous payments, so just check
+                               // that there's one payment here and move on.
+                               if sources.len() != 1 {
+                                       log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!");
+                                       debug_assert!(false);
+                                       valid_mpp = false;
+                                       break;
                                }
                        }
-                       mem::drop(channel_state_lock);
-                       if !valid_mpp {
-                               for htlc in sources.drain(..) {
-                                       let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
-                                       htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
-                                       let source = HTLCSource::PreviousHopData(htlc.prev_hop);
-                                       let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
-                                       let receiver = HTLCDestination::FailedPayment { payment_hash };
-                                       self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
+                       claimable_amt_msat += htlc.value;
+               }
+               if sources.is_empty() || expected_amt_msat.is_none() {
+                       mem::drop(channel_state);
+                       self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
+                       log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
+                       return;
+               }
+               if claimable_amt_msat != expected_amt_msat.unwrap() {
+                       mem::drop(channel_state);
+                       self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
+                       log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
+                               expected_amt_msat.unwrap(), claimable_amt_msat);
+                       return;
+               }
+               if valid_mpp {
+                       for htlc in sources.drain(..) {
+                               if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
+                               if let Err((pk, err)) = self.claim_funds_from_hop(channel_state.take().unwrap(), htlc.prev_hop,
+                                       payment_preimage,
+                                       |_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }))
+                               {
+                                       if let msgs::ErrorAction::IgnoreError = err.err.action {
+                                               // We got a temporary failure updating monitor, but will claim the
+                                               // HTLC when the monitor updating is restored (or on chain).
+                                               log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
+                                       } else { errs.push((pk, err)); }
                                }
                        }
-                       if claimed_any_htlcs {
-                               self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
-                                       receiver_node_id,
-                                       payment_hash,
-                                       purpose: payment_purpose,
-                                       amount_msat: claimable_amt_msat,
-                               });
+               }
+               mem::drop(channel_state);
+               if !valid_mpp {
+                       for htlc in sources.drain(..) {
+                               let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
+                               htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
+                               let source = HTLCSource::PreviousHopData(htlc.prev_hop);
+                               let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
+                               let receiver = HTLCDestination::FailedPayment { payment_hash };
+                               self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                        }
+                       self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
+               }
  
-                       // Now we can handle any errors which were generated.
-                       for (counterparty_node_id, err) in errs.drain(..) {
-                               let res: Result<(), _> = Err(err);
-                               let _ = handle_error!(self, res, counterparty_node_id);
-                       }
+               // Now we can handle any errors which were generated.
+               for (counterparty_node_id, err) in errs.drain(..) {
+                       let res: Result<(), _> = Err(err);
+                       let _ = handle_error!(self, res, counterparty_node_id);
                }
        }
  
-       fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
+       fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>) -> Option<MonitorUpdateCompletionAction>>(&self,
+               mut channel_state_lock: MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>,
+               prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
+       -> Result<(), (PublicKey, MsgHandleErrInternal)> {
                //TODO: Delay the claimed_funds relaying just like we do outbound relay!
  
                let chan_id = prev_hop.outpoint.to_channel_id();
-               let channel_state = &mut **channel_state_lock;
+               let channel_state = &mut *channel_state_lock;
                if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
+                       let counterparty_node_id = chan.get().get_counterparty_node_id();
                        match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
                                Ok(msgs_monitor_option) => {
                                        if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
                                                                log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
                                                                        "Failed to update channel monitor with preimage {:?}: {:?}",
                                                                        payment_preimage, e);
-                                                               return ClaimFundsFromHop::MonitorUpdateFail(
-                                                                       chan.get().get_counterparty_node_id(),
-                                                                       handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
-                                                                       Some(htlc_value_msat)
-                                                               );
+                                                               let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err();
+                                                               mem::drop(channel_state_lock);
+                                                               self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
+                                                               return Err((counterparty_node_id, err));
                                                        }
                                                }
                                                if let Some((msg, commitment_signed)) = msgs {
                                                                }
                                                        });
                                                }
-                                               return ClaimFundsFromHop::Success(htlc_value_msat);
+                                               mem::drop(channel_state_lock);
+                                               self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
+                                               Ok(())
                                        } else {
-                                               return ClaimFundsFromHop::DuplicateClaim;
+                                               Ok(())
                                        }
                                },
                                Err((e, monitor_update)) => {
                                        match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                ChannelMonitorUpdateStatus::Completed => {},
                                                e => {
+                                                       // TODO: This needs to be handled somehow - if we receive a monitor update
+                                                       // with a preimage we *must* somehow manage to propagate it to the upstream
+                                                       // channel, or we must have an ability to receive the same update and try
+                                                       // again on restart.
                                                        log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
                                                                "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
                                                                payment_preimage, e);
                                                },
                                        }
-                                       let counterparty_node_id = chan.get().get_counterparty_node_id();
                                        let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
                                        if drop {
                                                chan.remove_entry();
                                        }
-                                       return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None);
+                                       mem::drop(channel_state_lock);
+                                       self.handle_monitor_update_completion_actions(completion_action(None));
+                                       Err((counterparty_node_id, res))
                                },
                        }
-               } else { return ClaimFundsFromHop::PrevHopForceClosed }
+               } else {
+                       let preimage_update = ChannelMonitorUpdate {
+                               update_id: CLOSED_CHANNEL_UPDATE_ID,
+                               updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+                                       payment_preimage,
+                               }],
+                       };
+                       // We update the ChannelMonitor on the backward link, after
+                       // receiving an `update_fulfill_htlc` from the forward link.
+                       let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, preimage_update);
+                       if update_res != ChannelMonitorUpdateStatus::Completed {
+                               // TODO: This needs to be handled somehow - if we receive a monitor update
+                               // with a preimage we *must* somehow manage to propagate it to the upstream
+                               // channel, or we must have an ability to receive the same event and try
+                               // again on restart.
+                               log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+                                       payment_preimage, update_res);
+                       }
+                       mem::drop(channel_state_lock);
+                       // Note that we do process the completion action here. This totally could be a
+                       // duplicate claim, but we have no way of knowing without interrogating the
+                       // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
+                       // generally always allowed to be duplicative (and it's specifically noted in
+                       // `PaymentForwarded`).
+                       self.handle_monitor_update_completion_actions(completion_action(None));
+                       Ok(())
+               }
        }
  
        fn finalize_claims(&self, mut sources: Vec<HTLCSource>) {
                }
        }
  
-       fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
+       fn claim_funds_internal(&self, channel_state_lock: MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
                match source {
                        HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
                                mem::drop(channel_state_lock);
                        },
                        HTLCSource::PreviousHopData(hop_data) => {
                                let prev_outpoint = hop_data.outpoint;
-                               let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage);
-                               let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true };
-                               let htlc_claim_value_msat = match res {
-                                       ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt,
-                                       ClaimFundsFromHop::Success(amt) => Some(amt),
-                                       _ => None,
-                               };
-                               if let ClaimFundsFromHop::PrevHopForceClosed = res {
-                                       let preimage_update = ChannelMonitorUpdate {
-                                               update_id: CLOSED_CHANNEL_UPDATE_ID,
-                                               updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
-                                                       payment_preimage: payment_preimage.clone(),
-                                               }],
-                                       };
-                                       // We update the ChannelMonitor on the backward link, after
-                                       // receiving an offchain preimage event from the forward link (the
-                                       // event being update_fulfill_htlc).
-                                       let update_res = self.chain_monitor.update_channel(prev_outpoint, preimage_update);
-                                       if update_res != ChannelMonitorUpdateStatus::Completed {
-                                               // TODO: This needs to be handled somehow - if we receive a monitor update
-                                               // with a preimage we *must* somehow manage to propagate it to the upstream
-                                               // channel, or we must have an ability to receive the same event and try
-                                               // again on restart.
-                                               log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
-                                                       payment_preimage, update_res);
-                                       }
-                                       // Note that we do *not* set `claimed_htlc` to false here. In fact, this
-                                       // totally could be a duplicate claim, but we have no way of knowing
-                                       // without interrogating the `ChannelMonitor` we've provided the above
-                                       // update to. Instead, we simply document in `PaymentForwarded` that this
-                                       // can happen.
-                               }
-                               mem::drop(channel_state_lock);
-                               if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res {
+                               let res = self.claim_funds_from_hop(channel_state_lock, hop_data, payment_preimage,
+                                       |htlc_claim_value_msat| {
+                                               if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+                                                       let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
+                                                               Some(claimed_htlc_value - forwarded_htlc_value)
+                                                       } else { None };
+                                                       let prev_channel_id = Some(prev_outpoint.to_channel_id());
+                                                       let next_channel_id = Some(next_channel_id);
+                                                       Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded {
+                                                               fee_earned_msat,
+                                                               claim_from_onchain_tx: from_onchain,
+                                                               prev_channel_id,
+                                                               next_channel_id,
+                                                       }})
+                                               } else { None }
+                                       });
+                               if let Err((pk, err)) = res {
                                        let result: Result<(), _> = Err(err);
                                        let _ = handle_error!(self, result, pk);
                                }
-                               if claimed_htlc {
-                                       if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
-                                               let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
-                                                       Some(claimed_htlc_value - forwarded_htlc_value)
-                                               } else { None };
-                                               let mut pending_events = self.pending_events.lock().unwrap();
-                                               let prev_channel_id = Some(prev_outpoint.to_channel_id());
-                                               let next_channel_id = Some(next_channel_id);
-                                               pending_events.push(events::Event::PaymentForwarded {
-                                                       fee_earned_msat,
-                                                       claim_from_onchain_tx: from_onchain,
-                                                       prev_channel_id,
-                                                       next_channel_id,
-                                               });
-                                       }
-                               }
                        },
                }
        }
                self.our_network_pubkey.clone()
        }
  
+       fn handle_monitor_update_completion_actions<I: IntoIterator<Item=MonitorUpdateCompletionAction>>(&self, actions: I) {
+               for action in actions.into_iter() {
+                       match action {
+                               MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => {
+                                       let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
+                                       if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, receiver_node_id }) = payment {
+                                               self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
+                                                       payment_hash, purpose, amount_msat, receiver_node_id: Some(receiver_node_id),
+                                               });
+                                       }
+                               },
+                               MonitorUpdateCompletionAction::EmitEvent { event } => {
+                                       self.pending_events.lock().unwrap().push(event);
+                               },
+                       }
+               }
+       }
        /// Handles a channel reentering a functional state, either due to reconnect or a monitor
        /// update completion.
        fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
                                                PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
                                                        let reason = if (error_code & 0x1000) != 0 {
                                                                let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
 -                                                              onion_utils::build_first_hop_failure_packet(incoming_shared_secret, real_code, &error_data)
 +                                                              HTLCFailReason::reason(real_code, error_data)
                                                        } else {
 -                                                              onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[])
 -                                                      };
 +                                                              HTLCFailReason::from_failure_code(error_code)
 +                                                      }.get_encrypted_failure_packet(incoming_shared_secret, &None);
                                                        let msg = msgs::UpdateFailHTLC {
                                                                channel_id: msg.channel_id,
                                                                htlc_id: msg.htlc_id,
                                if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
 -                              try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), chan);
 +                              try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan);
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
                                        let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
                                        try_chan_entry!(self, Err(chan_err), chan);
                                }
 -                              try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::from_failure_code(msg.failure_code)), chan);
 +                              try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan);
                                Ok(())
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
        /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
        /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
        /// update was applied.
 -      ///
 -      /// This should only apply to HTLCs which were added to the holding cell because we were
 -      /// waiting on a monitor update to finish. In that case, we don't want to free the holding cell
 -      /// directly in `channel_monitor_updated` as it may introduce deadlocks calling back into user
 -      /// code to inform them of a channel monitor update.
        fn check_free_holding_cells(&self) -> bool {
                let mut has_monitor_update = false;
                let mut failed_htlcs = Vec::new();
@@@ -6100,7 -6343,7 +6163,7 @@@ wher
                }
  
                if let Some(height) = height_opt {
-                       self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+                       self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
                                htlcs.retain(|htlc| {
                                        // If height is approaching the number of blocks we think it takes us to get
                                        // our commitment transaction confirmed before the HTLC expires, plus the
@@@ -6857,6 -7100,16 +6920,6 @@@ impl Writeable for HTLCSource 
        }
  }
  
 -impl_writeable_tlv_based_enum!(HTLCFailReason,
 -      (0, LightningError) => {
 -              (0, err, required),
 -      },
 -      (1, Reason) => {
 -              (0, failure_code, required),
 -              (2, data, vec_type),
 -      },
 -;);
 -
  impl_writeable_tlv_based!(PendingAddHTLCInfo, {
        (0, forward_info, required),
        (1, prev_user_channel_id, (default_value, 0)),
@@@ -6955,12 -7208,12 +7018,12 @@@ impl<M: Deref, T: Deref, K: Deref, F: D
                }
  
                let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
-               let claimable_htlcs = self.claimable_htlcs.lock().unwrap();
+               let claimable_payments = self.claimable_payments.lock().unwrap();
                let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
  
                let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
-               (claimable_htlcs.len() as u64).write(writer)?;
-               for (payment_hash, (purpose, previous_hops)) in claimable_htlcs.iter() {
+               (claimable_payments.claimable_htlcs.len() as u64).write(writer)?;
+               for (payment_hash, (purpose, previous_hops)) in claimable_payments.claimable_htlcs.iter() {
                        payment_hash.write(writer)?;
                        (previous_hops.len() as u64).write(writer)?;
                        for htlc in previous_hops.iter() {
                if our_pending_intercepts.len() != 0 {
                        pending_intercepted_htlcs = Some(our_pending_intercepts);
                }
+               let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments);
+               if pending_claiming_payments.as_ref().unwrap().is_empty() {
+                       // LDK versions prior to 0.0.113 do not know how to read the pending claimed payments
+                       // map. Thus, if there are no entries we skip writing a TLV for it.
+                       pending_claiming_payments = None;
+               } else {
+                       debug_assert!(false, "While we have code to serialize pending_claiming_payments, the map should always be empty until a later PR");
+               }
                write_tlv_fields!(writer, {
                        (1, pending_outbound_payments_no_retry, required),
                        (2, pending_intercepted_htlcs, option),
                        (3, pending_outbound_payments, required),
+                       (4, pending_claiming_payments, option),
                        (5, self.our_network_pubkey, required),
                        (7, self.fake_scid_rand_bytes, required),
                        (9, htlc_purposes, vec_type),
@@@ -7375,10 -7639,12 +7449,12 @@@ impl<'a, M: Deref, T: Deref, K: Deref, 
                let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
                let mut probing_cookie_secret: Option<[u8; 32]> = None;
                let mut claimable_htlc_purposes = None;
+               let mut pending_claiming_payments = Some(HashMap::new());
                read_tlv_fields!(reader, {
                        (1, pending_outbound_payments_no_retry, option),
                        (2, pending_intercepted_htlcs, option),
                        (3, pending_outbound_payments, option),
+                       (4, pending_claiming_payments, option),
                        (5, received_network_pubkey, option),
                        (7, fake_scid_rand_bytes, option),
                        (9, claimable_htlc_purposes, vec_type),
                        pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
  
                        forward_htlcs: Mutex::new(forward_htlcs),
-                       claimable_htlcs: Mutex::new(claimable_htlcs),
+                       claimable_payments: Mutex::new(ClaimablePayments { claimable_htlcs, pending_claiming_payments: pending_claiming_payments.unwrap() }),
                        outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
                        id_to_peer: Mutex::new(id_to_peer),
                        short_to_chan_info: FairRwLock::new(short_to_chan_info),