Don't hold `per_peer_state` lock during chain monitor update
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 98ac04406fee629ddb575325daab3295e8a7edca..e475cf13854ad7eca231811847c3fbb7dab40511 100644 (file)
@@ -395,13 +395,6 @@ pub(super) enum RAACommitmentOrder {
 // Note this is only exposed in cfg(test):
 pub(super) struct ChannelHolder<Signer: Sign> {
        pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
-       /// Map from payment hash to the payment data and any HTLCs which are to us and can be
-       /// failed/claimed by the user.
-       ///
-       /// Note that while this is held in the same mutex as the channels themselves, no consistency
-       /// guarantees are made about the channels given here actually existing anymore by the time you
-       /// go to read them!
-       claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
        /// Messages to send to peers - pushed to in the same lock that they are generated in (except
        /// for broadcast messages, where ordering isn't as strict).
        pub(super) pending_msg_events: Vec<MessageSendEvent>,
@@ -670,19 +663,21 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage
 //  |
 //  |__`forward_htlcs`
 //  |
-//  |__`channel_state`
-//  |   |
-//  |   |__`id_to_peer`
+//  |__`pending_inbound_payments`
 //  |   |
-//  |   |__`short_to_chan_info`
+//  |   |__`claimable_htlcs`
 //  |   |
-//  |   |__`per_peer_state`
+//  |   |__`pending_outbound_payments`
 //  |       |
-//  |       |__`outbound_scid_aliases`
-//  |       |
-//  |       |__`pending_inbound_payments`
+//  |       |__`channel_state`
+//  |           |
+//  |           |__`id_to_peer`
 //  |           |
-//  |           |__`pending_outbound_payments`
+//  |           |__`short_to_chan_info`
+//  |           |
+//  |           |__`per_peer_state`
+//  |               |
+//  |               |__`outbound_scid_aliases`
 //  |               |
 //  |               |__`best_block`
 //  |               |
@@ -753,6 +748,15 @@ pub struct ChannelManager<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        #[cfg(not(test))]
        forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
 
+       /// Map from payment hash to the payment data and any HTLCs which are to us and can be
+       /// failed/claimed by the user.
+       ///
+       /// Note that, no consistency guarantees are made about the channels given here actually
+       /// existing anymore by the time you go to read them!
+       ///
+       /// See `ChannelManager` struct-level documentation for lock order requirements.
+       claimable_htlcs: Mutex<HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>>,
+
        /// The set of outbound SCID aliases across all our channels, including unconfirmed channels
        /// and some closed channels which reached a usable state prior to being closed. This is used
        /// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
@@ -1657,13 +1661,13 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
                        channel_state: Mutex::new(ChannelHolder{
                                by_id: HashMap::new(),
-                               claimable_htlcs: HashMap::new(),
                                pending_msg_events: Vec::new(),
                        }),
                        outbound_scid_aliases: Mutex::new(HashSet::new()),
                        pending_inbound_payments: Mutex::new(HashMap::new()),
                        pending_outbound_payments: Mutex::new(HashMap::new()),
                        forward_htlcs: Mutex::new(HashMap::new()),
+                       claimable_htlcs: Mutex::new(HashMap::new()),
                        id_to_peer: Mutex::new(HashMap::new()),
                        short_to_chan_info: FairRwLock::new(HashMap::new()),
 
@@ -1902,14 +1906,16 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                        if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){
                                                return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
                                        }
-                                       let per_peer_state = self.per_peer_state.read().unwrap();
-                                       let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) {
-                                               Some(peer_state) => {
-                                                       let peer_state = peer_state.lock().unwrap();
-                                                       let their_features = &peer_state.latest_features;
-                                                       chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
-                                               },
-                                               None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+                                       let (shutdown_msg, monitor_update, htlcs) = {
+                                               let per_peer_state = self.per_peer_state.read().unwrap();
+                                               match per_peer_state.get(&counterparty_node_id) {
+                                                       Some(peer_state) => {
+                                                               let peer_state = peer_state.lock().unwrap();
+                                                               let their_features = &peer_state.latest_features;
+                                                               chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
+                                                       },
+                                                       None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+                                               }
                                        };
                                        failed_htlcs = htlcs;
 
@@ -2317,7 +2323,14 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                Some((_cp_id, chan_id)) => Some(chan_id.clone()),
                                        };
                                        let chan_update_opt = if let Some(forwarding_id) = forwarding_id_opt {
-                                               let chan = channel_state.by_id.get_mut(&forwarding_id).unwrap();
+                                               let chan = match channel_state.by_id.get_mut(&forwarding_id) {
+                                                       None => {
+                                                               // Channel was removed. The short_to_chan_info and by_id maps have
+                                                               // no consistency guarantees.
+                                                               break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+                                                       },
+                                                       Some(chan) => chan
+                                               };
                                                if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
                                                        // Note that the behavior here should be identical to the above block - we
                                                        // should NOT reveal the existence or non-existence of a private channel if
@@ -2544,7 +2557,12 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                        },
                                        None => { },
                                }
-                       } else { unreachable!(); }
+                       } else {
+                               // The channel was likely removed after we fetched the id from the
+                               // `short_to_chan_info` map, but before we successfully locked the `by_id` map.
+                               // This can occur as no consistency guarantees exists between the two maps.
+                               return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
+                       }
                        return Ok(());
                };
 
@@ -3130,12 +3148,9 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                        mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
 
                        for (short_chan_id, mut pending_forwards) in forward_htlcs {
-                               let mut channel_state_lock = self.channel_state.lock().unwrap();
-                               let channel_state = &mut *channel_state_lock;
                                if short_chan_id != 0 {
-                                       let forward_chan_id = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) {
-                                               Some((_cp_id, chan_id)) => chan_id.clone(),
-                                               None => {
+                                       macro_rules! forwarding_channel_not_found {
+                                               () => {
                                                        for forward_info in pending_forwards.drain(..) {
                                                                match forward_info {
                                                                        HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
@@ -3222,136 +3237,148 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                                        }
                                                                }
                                                        }
+                                               }
+                                       }
+                                       let forward_chan_id = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) {
+                                               Some((_cp_id, chan_id)) => chan_id.clone(),
+                                               None => {
+                                                       forwarding_channel_not_found!();
                                                        continue;
                                                }
                                        };
-                                       if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) {
-                                               let mut add_htlc_msgs = Vec::new();
-                                               let mut fail_htlc_msgs = Vec::new();
-                                               for forward_info in pending_forwards.drain(..) {
-                                                       match forward_info {
-                                                               HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
-                                                                               routing: PendingHTLCRouting::Forward {
-                                                                                       onion_packet, ..
-                                                                               }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
-                                                                               prev_funding_outpoint } => {
-                                                                       log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
-                                                                       let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
-                                                                               short_channel_id: prev_short_channel_id,
-                                                                               outpoint: prev_funding_outpoint,
-                                                                               htlc_id: prev_htlc_id,
-                                                                               incoming_packet_shared_secret: incoming_shared_secret,
-                                                                               // Phantom payments are only PendingHTLCRouting::Receive.
-                                                                               phantom_shared_secret: None,
-                                                                       });
-                                                                       match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) {
-                                                                               Err(e) => {
-                                                                                       if let ChannelError::Ignore(msg) = e {
-                                                                                               log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
-                                                                                       } else {
-                                                                                               panic!("Stated return value requirements in send_htlc() were not met");
-                                                                                       }
-                                                                                       let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
-                                                                                       failed_forwards.push((htlc_source, payment_hash,
-                                                                                               HTLCFailReason::Reason { failure_code, data },
-                                                                                               HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
-                                                                                       ));
-                                                                                       continue;
-                                                                               },
-                                                                               Ok(update_add) => {
-                                                                                       match update_add {
-                                                                                               Some(msg) => { add_htlc_msgs.push(msg); },
-                                                                                               None => {
-                                                                                                       // Nothing to do here...we're waiting on a remote
-                                                                                                       // revoke_and_ack before we can add anymore HTLCs. The Channel
-                                                                                                       // will automatically handle building the update_add_htlc and
-                                                                                                       // commitment_signed messages when we can.
-                                                                                                       // TODO: Do some kind of timer to set the channel as !is_live()
-                                                                                                       // as we don't really want others relying on us relaying through
-                                                                                                       // this channel currently :/.
+                                       let mut channel_state_lock = self.channel_state.lock().unwrap();
+                                       let channel_state = &mut *channel_state_lock;
+                                       match channel_state.by_id.entry(forward_chan_id) {
+                                               hash_map::Entry::Vacant(_) => {
+                                                       forwarding_channel_not_found!();
+                                                       continue;
+                                               },
+                                               hash_map::Entry::Occupied(mut chan) => {
+                                                       let mut add_htlc_msgs = Vec::new();
+                                                       let mut fail_htlc_msgs = Vec::new();
+                                                       for forward_info in pending_forwards.drain(..) {
+                                                               match forward_info {
+                                                                       HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
+                                                                                       routing: PendingHTLCRouting::Forward {
+                                                                                               onion_packet, ..
+                                                                                       }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
+                                                                                       prev_funding_outpoint } => {
+                                                                               log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
+                                                                               let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+                                                                                       short_channel_id: prev_short_channel_id,
+                                                                                       outpoint: prev_funding_outpoint,
+                                                                                       htlc_id: prev_htlc_id,
+                                                                                       incoming_packet_shared_secret: incoming_shared_secret,
+                                                                                       // Phantom payments are only PendingHTLCRouting::Receive.
+                                                                                       phantom_shared_secret: None,
+                                                                               });
+                                                                               match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) {
+                                                                                       Err(e) => {
+                                                                                               if let ChannelError::Ignore(msg) = e {
+                                                                                                       log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
+                                                                                               } else {
+                                                                                                       panic!("Stated return value requirements in send_htlc() were not met");
+                                                                                               }
+                                                                                               let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
+                                                                                               failed_forwards.push((htlc_source, payment_hash,
+                                                                                                       HTLCFailReason::Reason { failure_code, data },
+                                                                                                       HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
+                                                                                               ));
+                                                                                               continue;
+                                                                                       },
+                                                                                       Ok(update_add) => {
+                                                                                               match update_add {
+                                                                                                       Some(msg) => { add_htlc_msgs.push(msg); },
+                                                                                                       None => {
+                                                                                                               // Nothing to do here...we're waiting on a remote
+                                                                                                               // revoke_and_ack before we can add anymore HTLCs. The Channel
+                                                                                                               // will automatically handle building the update_add_htlc and
+                                                                                                               // commitment_signed messages when we can.
+                                                                                                               // TODO: Do some kind of timer to set the channel as !is_live()
+                                                                                                               // as we don't really want others relying on us relaying through
+                                                                                                               // this channel currently :/.
+                                                                                                       }
                                                                                                }
                                                                                        }
                                                                                }
-                                                                       }
-                                                               },
-                                                               HTLCForwardInfo::AddHTLC { .. } => {
-                                                                       panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
-                                                               },
-                                                               HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
-                                                                       log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
-                                                                       match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
-                                                                               Err(e) => {
-                                                                                       if let ChannelError::Ignore(msg) = e {
-                                                                                               log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
-                                                                                       } else {
-                                                                                               panic!("Stated return value requirements in get_update_fail_htlc() were not met");
+                                                                       },
+                                                                       HTLCForwardInfo::AddHTLC { .. } => {
+                                                                               panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
+                                                                       },
+                                                                       HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
+                                                                               log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+                                                                               match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
+                                                                                       Err(e) => {
+                                                                                               if let ChannelError::Ignore(msg) = e {
+                                                                                                       log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+                                                                                               } else {
+                                                                                                       panic!("Stated return value requirements in get_update_fail_htlc() were not met");
+                                                                                               }
+                                                                                               // fail-backs are best-effort, we probably already have one
+                                                                                               // pending, and if not that's OK, if not, the channel is on
+                                                                                               // the chain and sending the HTLC-Timeout is their problem.
+                                                                                               continue;
+                                                                                       },
+                                                                                       Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
+                                                                                       Ok(None) => {
+                                                                                               // Nothing to do here...we're waiting on a remote
+                                                                                               // revoke_and_ack before we can update the commitment
+                                                                                               // transaction. The Channel will automatically handle
+                                                                                               // building the update_fail_htlc and commitment_signed
+                                                                                               // messages when we can.
+                                                                                               // We don't need any kind of timer here as they should fail
+                                                                                               // the channel onto the chain if they can't get our
+                                                                                               // update_fail_htlc in time, it's not our problem.
                                                                                        }
-                                                                                       // fail-backs are best-effort, we probably already have one
-                                                                                       // pending, and if not that's OK, if not, the channel is on
-                                                                                       // the chain and sending the HTLC-Timeout is their problem.
-                                                                                       continue;
-                                                                               },
-                                                                               Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
-                                                                               Ok(None) => {
-                                                                                       // Nothing to do here...we're waiting on a remote
-                                                                                       // revoke_and_ack before we can update the commitment
-                                                                                       // transaction. The Channel will automatically handle
-                                                                                       // building the update_fail_htlc and commitment_signed
-                                                                                       // messages when we can.
-                                                                                       // We don't need any kind of timer here as they should fail
-                                                                                       // the channel onto the chain if they can't get our
-                                                                                       // update_fail_htlc in time, it's not our problem.
                                                                                }
-                                                                       }
-                                                               },
+                                                                       },
+                                                               }
                                                        }
-                                               }
 
-                                               if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
-                                                       let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) {
-                                                               Ok(res) => res,
-                                                               Err(e) => {
-                                                                       // We surely failed send_commitment due to bad keys, in that case
-                                                                       // close channel and then send error message to peer.
-                                                                       let counterparty_node_id = chan.get().get_counterparty_node_id();
-                                                                       let err: Result<(), _>  = match e {
-                                                                               ChannelError::Ignore(_) | ChannelError::Warn(_) => {
-                                                                                       panic!("Stated return value requirements in send_commitment() were not met");
-                                                                               }
-                                                                               ChannelError::Close(msg) => {
-                                                                                       log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
-                                                                                       let mut channel = remove_channel!(self, chan);
-                                                                                       // ChannelClosed event is generated by handle_error for us.
-                                                                                       Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
-                                                                               },
-                                                                       };
-                                                                       handle_errors.push((counterparty_node_id, err));
-                                                                       continue;
-                                                               }
-                                                       };
-                                                       match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                               ChannelMonitorUpdateStatus::Completed => {},
-                                                               e => {
-                                                                       handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
-                                                                       continue;
+                                                       if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
+                                                               let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) {
+                                                                       Ok(res) => res,
+                                                                       Err(e) => {
+                                                                               // We surely failed send_commitment due to bad keys, in that case
+                                                                               // close channel and then send error message to peer.
+                                                                               let counterparty_node_id = chan.get().get_counterparty_node_id();
+                                                                               let err: Result<(), _>  = match e {
+                                                                                       ChannelError::Ignore(_) | ChannelError::Warn(_) => {
+                                                                                               panic!("Stated return value requirements in send_commitment() were not met");
+                                                                                       }
+                                                                                       ChannelError::Close(msg) => {
+                                                                                               log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
+                                                                                               let mut channel = remove_channel!(self, chan);
+                                                                                               // ChannelClosed event is generated by handle_error for us.
+                                                                                               Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
+                                                                                       },
+                                                                               };
+                                                                               handle_errors.push((counterparty_node_id, err));
+                                                                               continue;
+                                                                       }
+                                                               };
+                                                               match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                                                       ChannelMonitorUpdateStatus::Completed => {},
+                                                                       e => {
+                                                                               handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
+                                                                               continue;
+                                                                       }
                                                                }
+                                                               log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
+                                                                       add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
+                                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                                       node_id: chan.get().get_counterparty_node_id(),
+                                                                       updates: msgs::CommitmentUpdate {
+                                                                               update_add_htlcs: add_htlc_msgs,
+                                                                               update_fulfill_htlcs: Vec::new(),
+                                                                               update_fail_htlcs: fail_htlc_msgs,
+                                                                               update_fail_malformed_htlcs: Vec::new(),
+                                                                               update_fee: None,
+                                                                               commitment_signed: commitment_msg,
+                                                                       },
+                                                               });
                                                        }
-                                                       log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
-                                                               add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                               node_id: chan.get().get_counterparty_node_id(),
-                                                               updates: msgs::CommitmentUpdate {
-                                                                       update_add_htlcs: add_htlc_msgs,
-                                                                       update_fulfill_htlcs: Vec::new(),
-                                                                       update_fail_htlcs: fail_htlc_msgs,
-                                                                       update_fail_malformed_htlcs: Vec::new(),
-                                                                       update_fee: None,
-                                                                       commitment_signed: commitment_msg,
-                                                               },
-                                                       });
                                                }
-                                       } else {
-                                               unreachable!();
                                        }
                                } else {
                                        for forward_info in pending_forwards.drain(..) {
@@ -3413,7 +3440,8 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                                                                payment_secret: $payment_data.payment_secret,
                                                                                        }
                                                                                };
-                                                                               let (_, htlcs) = channel_state.claimable_htlcs.entry(payment_hash)
+                                                                               let mut claimable_htlcs = self.claimable_htlcs.lock().unwrap();
+                                                                               let (_, htlcs) = claimable_htlcs.entry(payment_hash)
                                                                                        .or_insert_with(|| (purpose(), Vec::new()));
                                                                                if htlcs.len() == 1 {
                                                                                        if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
@@ -3481,7 +3509,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                                                                check_total_value!(payment_data, payment_preimage);
                                                                                        },
                                                                                        OnionPayload::Spontaneous(preimage) => {
-                                                                                               match channel_state.claimable_htlcs.entry(payment_hash) {
+                                                                                               match self.claimable_htlcs.lock().unwrap().entry(payment_hash) {
                                                                                                        hash_map::Entry::Vacant(e) => {
                                                                                                                let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
                                                                                                                e.insert((purpose.clone(), vec![claimable_htlc]));
@@ -3770,29 +3798,29 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
                                        true
                                });
+                       }
 
-                               channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
-                                       if htlcs.is_empty() {
-                                               // This should be unreachable
-                                               debug_assert!(false);
+                       self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+                               if htlcs.is_empty() {
+                                       // This should be unreachable
+                                       debug_assert!(false);
+                                       return false;
+                               }
+                               if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
+                                       // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
+                                       // In this case we're not going to handle any timeouts of the parts here.
+                                       if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
+                                               return true;
+                                       } else if htlcs.into_iter().any(|htlc| {
+                                               htlc.timer_ticks += 1;
+                                               return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
+                                       }) {
+                                               timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
                                                return false;
                                        }
-                                       if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
-                                               // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
-                                               // In this case we're not going to handle any timeouts of the parts here.
-                                               if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
-                                                       return true;
-                                               } else if htlcs.into_iter().any(|htlc| {
-                                                       htlc.timer_ticks += 1;
-                                                       return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
-                                               }) {
-                                                       timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
-                                                       return false;
-                                               }
-                                       }
-                                       true
-                               });
-                       }
+                               }
+                               true
+                       });
 
                        for htlc_source in timed_out_mpp_htlcs.drain(..) {
                                let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
@@ -3825,10 +3853,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let removed_source = {
-                       let mut channel_state = self.channel_state.lock().unwrap();
-                       channel_state.claimable_htlcs.remove(payment_hash)
-               };
+               let removed_source = self.claimable_htlcs.lock().unwrap().remove(payment_hash);
                if let Some((_, mut sources)) = removed_source {
                        for htlc in sources.drain(..) {
                                let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
@@ -4130,7 +4155,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let removed_source = self.channel_state.lock().unwrap().claimable_htlcs.remove(&payment_hash);
+               let removed_source = self.claimable_htlcs.lock().unwrap().remove(&payment_hash);
                if let Some((payment_purpose, mut sources)) = removed_source {
                        assert!(!sources.is_empty());
 
@@ -4310,7 +4335,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                        return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None);
                                },
                        }
-               } else { unreachable!(); }
+               } else { return ClaimFundsFromHop::PrevHopForceClosed }
        }
 
        fn finalize_claims(&self, mut sources: Vec<HTLCSource>) {
@@ -5226,7 +5251,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                        try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan);
                                }
                        },
-                       hash_map::Entry::Vacant(_) => unreachable!()
+                       hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
                }
                Ok(NotifyOption::DoPersist)
        }
@@ -5998,28 +6023,28 @@ where
                                }
                                true
                        });
+               }
 
-                       if let Some(height) = height_opt {
-                               channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
-                                       htlcs.retain(|htlc| {
-                                               // If height is approaching the number of blocks we think it takes us to get
-                                               // our commitment transaction confirmed before the HTLC expires, plus the
-                                               // number of blocks we generally consider it to take to do a commitment update,
-                                               // just give up on it and fail the HTLC.
-                                               if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
-                                                       let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
-                                                       htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
-
-                                                       timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
-                                                               failure_code: 0x4000 | 15,
-                                                               data: htlc_msat_height_data
-                                                       }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
-                                                       false
-                                               } else { true }
-                                       });
-                                       !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
+               if let Some(height) = height_opt {
+                       self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+                               htlcs.retain(|htlc| {
+                                       // If height is approaching the number of blocks we think it takes us to get
+                                       // our commitment transaction confirmed before the HTLC expires, plus the
+                                       // number of blocks we generally consider it to take to do a commitment update,
+                                       // just give up on it and fail the HTLC.
+                                       if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
+                                               let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
+                                               htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
+
+                                               timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
+                                                       failure_code: 0x4000 | 15,
+                                                       data: htlc_msat_height_data
+                                               }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
+                                               false
+                                       } else { true }
                                });
-                       }
+                               !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
+                       });
                }
 
                self.handle_init_event_channel_failures(failed_channels);
@@ -6754,10 +6779,13 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelMana
                        }
                }
 
-               let channel_state = self.channel_state.lock().unwrap();
+               let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
+               let claimable_htlcs = self.claimable_htlcs.lock().unwrap();
+               let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
+
                let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
-               (channel_state.claimable_htlcs.len() as u64).write(writer)?;
-               for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() {
+               (claimable_htlcs.len() as u64).write(writer)?;
+               for (payment_hash, (purpose, previous_hops)) in claimable_htlcs.iter() {
                        payment_hash.write(writer)?;
                        (previous_hops.len() as u64).write(writer)?;
                        for htlc in previous_hops.iter() {
@@ -6774,8 +6802,6 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelMana
                        peer_state.latest_features.write(writer)?;
                }
 
-               let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
-               let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
                let events = self.pending_events.lock().unwrap();
                (events.len() as u64).write(writer)?;
                for event in events.iter() {
@@ -7368,7 +7394,6 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
                        channel_state: Mutex::new(ChannelHolder {
                                by_id,
-                               claimable_htlcs,
                                pending_msg_events: Vec::new(),
                        }),
                        inbound_payment_key: expanded_inbound_key,
@@ -7376,6 +7401,7 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
 
                        forward_htlcs: Mutex::new(forward_htlcs),
+                       claimable_htlcs: Mutex::new(claimable_htlcs),
                        outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
                        id_to_peer: Mutex::new(id_to_peer),
                        short_to_chan_info: FairRwLock::new(short_to_chan_info),