Expect `pending_msg_events` to be in random peer order in tests
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 7b2865bb1475c6e80f96b4859a9eafa10116c839..108bc7063d3e25f24fca2b736bf2311d41d6988a 100644 (file)
@@ -436,9 +436,6 @@ struct ClaimablePayments {
 
 // Note this is only exposed in cfg(test):
 pub(super) struct ChannelHolder {
-       /// Messages to send to peers - pushed to in the same lock that they are generated in (except
-       /// for broadcast messages, where ordering isn't as strict).
-       pub(super) pending_msg_events: Vec<MessageSendEvent>,
 }
 
 /// Events which we process internally but cannot be procsesed immediately at the generation site
@@ -470,6 +467,9 @@ pub(super) struct PeerState<Signer: Sign> {
        pub(super) channel_by_id: HashMap<[u8; 32], Channel<Signer>>,
        /// The latest `InitFeatures` we heard from the peer.
        latest_features: InitFeatures,
+       /// Messages to send to the peer - pushed to in the same lock that they are generated in (except
+       /// for broadcast messages, where ordering isn't as strict).
+       pub(super) pending_msg_events: Vec<MessageSendEvent>,
 }
 
 /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
@@ -699,6 +699,9 @@ where
        /// the corresponding channel for the event, as we only have access to the `channel_id` during
        /// the handling of the events.
        ///
+       /// Note that no consistency guarantees are made about the existence of a peer with the
+       /// `counterparty_node_id` in our other maps.
+       ///
        /// TODO:
        /// The `counterparty_node_id` isn't passed with `MonitorEvent`s currently. To pass it, we need
        /// to make `counterparty_node_id`'s a required field in `ChannelMonitor`s, which unfortunately
@@ -1162,6 +1165,7 @@ macro_rules! handle_error {
                                        // entering the macro.
                                        assert!($self.channel_state.try_lock().is_ok());
                                        assert!($self.pending_events.try_lock().is_ok());
+                                       assert!($self.per_peer_state.try_write().is_ok());
                                }
 
                                let mut msg_events = Vec::with_capacity(2);
@@ -1191,7 +1195,31 @@ macro_rules! handle_error {
                                }
 
                                if !msg_events.is_empty() {
-                                       $self.channel_state.lock().unwrap().pending_msg_events.append(&mut msg_events);
+                                       let per_peer_state = $self.per_peer_state.read().unwrap();
+                                       if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
+                                               let mut peer_state = peer_state_mutex.lock().unwrap();
+                                               peer_state.pending_msg_events.append(&mut msg_events);
+                                       }
+                                       #[cfg(debug_assertions)]
+                                       {
+                                               if let None = per_peer_state.get(&$counterparty_node_id) {
+                                                       // This shouldn't occour in tests unless an unkown counterparty_node_id
+                                                       // has been passed to our message handling functions.
+                                                       let expected_error_str = format!("Can't find a peer matching the passed counterparty node_id {}", $counterparty_node_id);
+                                                       match err.action {
+                                                               msgs::ErrorAction::SendErrorMessage {
+                                                                       msg: msgs::ErrorMessage { ref channel_id, ref data }
+                                                               }
+                                                               => {
+                                                                       assert_eq!(*data, expected_error_str);
+                                                                       if let Some((err_channel_id, _user_channel_id)) = chan_id {
+                                                                               assert_eq!(*channel_id, err_channel_id);
+                                                                       }
+                                                               }
+                                                               _ => panic!("Unexpected event"),
+                                                       }
+                                               }
+                                       }
                                }
 
                                // Return error in case higher-API need one
@@ -1426,7 +1454,6 @@ where
                        best_block: RwLock::new(params.best_block),
 
                        channel_state: Mutex::new(ChannelHolder{
-                               pending_msg_events: Vec::new(),
                        }),
                        outbound_scid_aliases: Mutex::new(HashSet::new()),
                        pending_inbound_payments: Mutex::new(HashMap::new()),
@@ -1515,41 +1542,38 @@ where
                        return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
                }
 
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               // We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
+               debug_assert!(&self.total_consistency_lock.try_write().is_err());
+
+               let mut channel_state = self.channel_state.lock().unwrap();
+               let per_peer_state = self.per_peer_state.read().unwrap();
+
+               let peer_state_mutex_opt = per_peer_state.get(&their_network_key);
+               if let None = peer_state_mutex_opt {
+                       return Err(APIError::APIMisuseError { err: format!("Not connected to node: {}", their_network_key) });
+               }
+
+               let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
                let channel = {
-                       let per_peer_state = self.per_peer_state.read().unwrap();
-                       match per_peer_state.get(&their_network_key) {
-                               Some(peer_state) => {
-                                       let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
-                                       let peer_state = peer_state.lock().unwrap();
-                                       let their_features = &peer_state.latest_features;
-                                       let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
-                                       match Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key,
-                                               their_features, channel_value_satoshis, push_msat, user_channel_id, config,
-                                               self.best_block.read().unwrap().height(), outbound_scid_alias)
-                                       {
-                                               Ok(res) => res,
-                                               Err(e) => {
-                                                       self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
-                                                       return Err(e);
-                                               },
-                                       }
+                       let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
+                       let their_features = &peer_state.latest_features;
+                       let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
+                       match Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key,
+                               their_features, channel_value_satoshis, push_msat, user_channel_id, config,
+                               self.best_block.read().unwrap().height(), outbound_scid_alias)
+                       {
+                               Ok(res) => res,
+                               Err(e) => {
+                                       self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
+                                       return Err(e);
                                },
-                               None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }),
                        }
                };
                let res = channel.get_open_channel(self.genesis_hash.clone());
 
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               // We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
-               debug_assert!(&self.total_consistency_lock.try_write().is_err());
-
                let temporary_channel_id = channel.channel_id();
-               let mut channel_state = self.channel_state.lock().unwrap();
-               let per_peer_state = self.per_peer_state.read().unwrap();
-               if let Some(peer_state_mutex) = per_peer_state.get(&their_network_key){
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(temporary_channel_id) {
+               match peer_state.channel_by_id.entry(temporary_channel_id) {
                        hash_map::Entry::Occupied(_) => {
                                if cfg!(fuzzing) {
                                        return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
@@ -1558,9 +1582,9 @@ where
                                }
                        },
                        hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
-                       }
-               } else { return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }) }
-               channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+               }
+
+               peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
                        node_id: their_network_key,
                        msg: res,
                });
@@ -1678,48 +1702,47 @@ where
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        let per_peer_state = self.per_peer_state.read().unwrap();
-                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               match peer_state.channel_by_id.entry(channel_id.clone()) {
-                                       hash_map::Entry::Occupied(mut chan_entry) => {
-                                               if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){
-                                                       return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
-                                               }
-                                               let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.keys_manager, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?;
-                                               failed_htlcs = htlcs;
-
-                                               // Update the monitor with the shutdown script if necessary.
-                                               if let Some(monitor_update) = monitor_update {
-                                                       let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
-                                                       let (result, is_permanent) =
-                                                               handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
-                                                       if is_permanent {
-                                                               remove_channel!(self, chan_entry);
-                                                               break result;
-                                                       }
+
+                       let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+                       if let None = peer_state_mutex_opt {
+                               return Err(APIError::APIMisuseError { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) });
+                       }
+
+                       let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(channel_id.clone()) {
+                               hash_map::Entry::Occupied(mut chan_entry) => {
+                                       let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.keys_manager, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?;
+                                       failed_htlcs = htlcs;
+
+                                       // Update the monitor with the shutdown script if necessary.
+                                       if let Some(monitor_update) = monitor_update {
+                                               let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
+                                               let (result, is_permanent) =
+                                                       handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+                                               if is_permanent {
+                                                       remove_channel!(self, chan_entry);
+                                                       break result;
                                                }
+                                       }
 
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                                       node_id: *counterparty_node_id,
-                                                       msg: shutdown_msg
-                                               });
+                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                               node_id: *counterparty_node_id,
+                                               msg: shutdown_msg
+                                       });
 
-                                               if chan_entry.get().is_shutdown() {
-                                                       let channel = remove_channel!(self, chan_entry);
-                                                       if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
-                                                               channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                       msg: channel_update
-                                                               });
-                                                       }
-                                                       self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed);
+                                       if chan_entry.get().is_shutdown() {
+                                               let channel = remove_channel!(self, chan_entry);
+                                               if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
+                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                               msg: channel_update
+                                                       });
                                                }
-                                               break Ok(());
-                                       },
-                                       hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() })
-                               }
-                       } else {
-                               return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) });
+                                               self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed);
+                                       }
+                                       break Ok(());
+                               },
+                               hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), counterparty_node_id) })
                        }
                };
 
@@ -1800,33 +1823,30 @@ where
        /// user closes, which will be re-exposed as the `ChannelClosed` reason.
        fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
        -> Result<PublicKey, APIError> {
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               let peer_state_mutex_opt = per_peer_state.get(peer_node_id);
                let mut chan = {
-                       let per_peer_state = self.per_peer_state.read().unwrap();
-                       if let Some(peer_state_mutex) = per_peer_state.get(peer_node_id) {
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
-                                       if chan.get().get_counterparty_node_id() != *peer_node_id {
-                                               return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
-                                       }
-                                       if let Some(peer_msg) = peer_msg {
-                                               self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
-                                       } else {
-                                               self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
-                                       }
-                                       remove_channel!(self, chan)
+                       if let None = peer_state_mutex_opt {
+                               return Err(APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) });
+                       }
+                       let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
+                               if let Some(peer_msg) = peer_msg {
+                                       self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
                                } else {
-                                       return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
+                                       self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
                                }
+                               remove_channel!(self, chan)
                        } else {
-                               return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", peer_node_id) });
+                               return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
                        }
                };
                log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
                self.finish_force_close_channel(chan.force_shutdown(broadcast));
                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                       let mut channel_state = self.channel_state.lock().unwrap();
-                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                       let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
+                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                msg: update
                        });
                }
@@ -1838,14 +1858,18 @@ where
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
                        Ok(counterparty_node_id) => {
-                               self.channel_state.lock().unwrap().pending_msg_events.push(
-                                       events::MessageSendEvent::HandleError {
-                                               node_id: counterparty_node_id,
-                                               action: msgs::ErrorAction::SendErrorMessage {
-                                                       msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
-                                               },
-                                       }
-                               );
+                               let per_peer_state = self.per_peer_state.read().unwrap();
+                               if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+                                       let mut peer_state = peer_state_mutex.lock().unwrap();
+                                       peer_state.pending_msg_events.push(
+                                               events::MessageSendEvent::HandleError {
+                                                       node_id: counterparty_node_id,
+                                                       action: msgs::ErrorAction::SendErrorMessage {
+                                                               msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
+                                                       },
+                                               }
+                                       );
+                               }
                                Ok(())
                        },
                        Err(e) => Err(e)
@@ -2104,11 +2128,11 @@ where
                                        };
                                        let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
                                                let per_peer_state = self.per_peer_state.read().unwrap();
-                                               if let None = per_peer_state.get(&counterparty_node_id) {
+                                               let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+                                               if let None = peer_state_mutex_opt {
                                                        break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
                                                }
-                                               let peer_state_mutex = per_peer_state.get(&counterparty_node_id).unwrap();
-                                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                                               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                                let peer_state = &mut *peer_state_lock;
                                                let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
                                                        None => {
@@ -2299,69 +2323,71 @@ where
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        let per_peer_state = self.per_peer_state.read().unwrap();
-                       if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) {
-                                       match {
-                                               if !chan.get().is_live() {
-                                                       return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
+                       let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+                       if let None = peer_state_mutex_opt {
+                               return Err(APIError::InvalidRoute{err: "No peer matching the path's first hop found!" });
+                       }
+                       let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) {
+                               match {
+                                       if !chan.get().is_live() {
+                                               return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
+                                       }
+                                       break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
+                                               htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+                                                       path: path.clone(),
+                                                       session_priv: session_priv.clone(),
+                                                       first_hop_htlc_msat: htlc_msat,
+                                                       payment_id,
+                                                       payment_secret: payment_secret.clone(),
+                                                       payment_params: payment_params.clone(),
+                                               }, onion_packet, &self.logger),
+                                               chan)
+                               } {
+                                       Some((update_add, commitment_signed, monitor_update)) => {
+                                               let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
+                                               let chan_id = chan.get().channel_id();
+                                               match (update_err,
+                                                       handle_monitor_update_res!(self, update_err, chan,
+                                                               RAACommitmentOrder::CommitmentFirst, false, true))
+                                               {
+                                                       (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
+                                                       (ChannelMonitorUpdateStatus::Completed, Ok(())) => {},
+                                                       (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
+                                                               // Note that MonitorUpdateInProgress here indicates (per function
+                                                               // docs) that we will resend the commitment update once monitor
+                                                               // updating completes. Therefore, we must return an error
+                                                               // indicating that it is unsafe to retry the payment wholesale,
+                                                               // which we do in the send_payment check for
+                                                               // MonitorUpdateInProgress, below.
+                                                               return Err(APIError::MonitorUpdateInProgress);
+                                                       },
+                                                       _ => unreachable!(),
                                                }
-                                               break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
-                                                       htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
-                                                               path: path.clone(),
-                                                               session_priv: session_priv.clone(),
-                                                               first_hop_htlc_msat: htlc_msat,
-                                                               payment_id,
-                                                               payment_secret: payment_secret.clone(),
-                                                               payment_params: payment_params.clone(),
-                                                       }, onion_packet, &self.logger),
-                                                       chan)
-                                       } {
-                                               Some((update_add, commitment_signed, monitor_update)) => {
-                                                       let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
-                                                       let chan_id = chan.get().channel_id();
-                                                       match (update_err,
-                                                               handle_monitor_update_res!(self, update_err, chan,
-                                                                       RAACommitmentOrder::CommitmentFirst, false, true))
-                                                       {
-                                                               (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
-                                                               (ChannelMonitorUpdateStatus::Completed, Ok(())) => {},
-                                                               (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
-                                                                       // Note that MonitorUpdateInProgress here indicates (per function
-                                                                       // docs) that we will resend the commitment update once monitor
-                                                                       // updating completes. Therefore, we must return an error
-                                                                       // indicating that it is unsafe to retry the payment wholesale,
-                                                                       // which we do in the send_payment check for
-                                                                       // MonitorUpdateInProgress, below.
-                                                                       return Err(APIError::MonitorUpdateInProgress);
-                                                               },
-                                                               _ => unreachable!(),
-                                                       }
 
-                                                       log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                               node_id: path.first().unwrap().pubkey,
-                                                               updates: msgs::CommitmentUpdate {
-                                                                       update_add_htlcs: vec![update_add],
-                                                                       update_fulfill_htlcs: Vec::new(),
-                                                                       update_fail_htlcs: Vec::new(),
-                                                                       update_fail_malformed_htlcs: Vec::new(),
-                                                                       update_fee: None,
-                                                                       commitment_signed,
-                                                               },
-                                                       });
-                                               },
-                                               None => { },
-                                       }
-                               } else {
-                                       // The channel was likely removed after we fetched the id from the
-                                       // `short_to_chan_info` map, but before we successfully locked the
-                                       // `channel_by_id` map.
-                                       // This can occur as no consistency guarantees exists between the two maps.
-                                       return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
+                                               log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                       node_id: path.first().unwrap().pubkey,
+                                                       updates: msgs::CommitmentUpdate {
+                                                               update_add_htlcs: vec![update_add],
+                                                               update_fulfill_htlcs: Vec::new(),
+                                                               update_fail_htlcs: Vec::new(),
+                                                               update_fail_malformed_htlcs: Vec::new(),
+                                                               update_fee: None,
+                                                               commitment_signed,
+                                                       },
+                                               });
+                                       },
+                                       None => { },
                                }
-                       } else { return Err(APIError::InvalidRoute{err: "No peer matching the path's first hop found!" })}
+                       } else {
+                               // The channel was likely removed after we fetched the id from the
+                               // `short_to_chan_info` map, but before we successfully locked the
+                               // `channel_by_id` map.
+                               // This can occur as no consistency guarantees exists between the two maps.
+                               return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
+                       }
                        return Ok(());
                };
 
@@ -2533,26 +2559,28 @@ where
        fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<<K::Target as SignerProvider>::Signer>, &Transaction) -> Result<OutPoint, APIError>>(
                &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
        ) -> Result<(), APIError> {
+               let mut channel_state = self.channel_state.lock().unwrap();
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+               if let None = peer_state_mutex_opt {
+                       return Err(APIError::APIMisuseError { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })
+               }
+
+               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
                let (chan, msg) = {
                        let (res, chan) = {
-                               let per_peer_state = self.per_peer_state.read().unwrap();
-                               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                                       let peer_state = &mut *peer_state_lock;
-                                       match peer_state.channel_by_id.remove(temporary_channel_id) {
-                                               Some(mut chan) => {
-                                                       let funding_txo = find_funding_output(&chan, &funding_transaction)?;
-
-                                                       (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
-                                                               .map_err(|e| if let ChannelError::Close(msg) = e {
-                                                                       MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
-                                                               } else { unreachable!(); })
-                                                       , chan)
-                                               },
-                                               None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) },
-                                       }
-                               } else {
-                                       return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) })
+                               match peer_state.channel_by_id.remove(temporary_channel_id) {
+                                       Some(mut chan) => {
+                                               let funding_txo = find_funding_output(&chan, &funding_transaction)?;
+
+                                               (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
+                                                       .map_err(|e| if let ChannelError::Close(msg) = e {
+                                                               MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
+                                                       } else { unreachable!(); })
+                                               , chan)
+                                       },
+                                       None => { return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*temporary_channel_id), counterparty_node_id) }) },
                                }
                        };
                        match handle_error!(self, res, chan.get_counterparty_node_id()) {
@@ -2565,29 +2593,23 @@ where
                        }
                };
 
-               let mut channel_state = self.channel_state.lock().unwrap();
-               channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
+               peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
                        node_id: chan.get_counterparty_node_id(),
                        msg,
                });
                mem::drop(channel_state);
-               let per_peer_state = self.per_peer_state.read().unwrap();
-               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(chan.channel_id()) {
-                               hash_map::Entry::Occupied(_) => {
-                                       panic!("Generated duplicate funding txid?");
-                               },
-                               hash_map::Entry::Vacant(e) => {
-                                       let mut id_to_peer = self.id_to_peer.lock().unwrap();
-                                       if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() {
-                                               panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
-                                       }
-                                       e.insert(chan);
+               match peer_state.channel_by_id.entry(chan.channel_id()) {
+                       hash_map::Entry::Occupied(_) => {
+                               panic!("Generated duplicate funding txid?");
+                       },
+                       hash_map::Entry::Vacant(e) => {
+                               let mut id_to_peer = self.id_to_peer.lock().unwrap();
+                               if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() {
+                                       panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
                                }
+                               e.insert(chan);
                        }
-               } else { return Err(APIError::ChannelUnavailable { err: format!("Peer with counterparty_node_id {} disconnected and closed the channel", counterparty_node_id) }) }
+               }
                Ok(())
        }
 
@@ -2710,36 +2732,34 @@ where
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(
                        &self.total_consistency_lock, &self.persistence_notifier,
                );
-               {
-                       let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = &mut *channel_state_lock;
-                       let per_peer_state = self.per_peer_state.read().unwrap();
-                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               for channel_id in channel_ids {
-                                       if !peer_state.channel_by_id.contains_key(channel_id) {
-                                               return Err(APIError::ChannelUnavailable {
-                                                       err: format!("Channel with ID {} was not found", log_bytes!(*channel_id)),
-                                               });
-                                       }
-                               }
-                               for channel_id in channel_ids {
-                                       let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
-                                       if !channel.update_config(config) {
-                                               continue;
-                                       }
-                                       if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
-                                       } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                                       node_id: channel.get_counterparty_node_id(),
-                                                       msg,
-                                               });
-                                       }
-                               }
-                       } else {
-                               return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) });
+               let mut channel_state_lock = self.channel_state.lock().unwrap();
+               let channel_state = &mut *channel_state_lock;
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+               if let None = peer_state_mutex_opt {
+                       return Err(APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) });
+               }
+               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               for channel_id in channel_ids {
+                       if !peer_state.channel_by_id.contains_key(channel_id) {
+                               return Err(APIError::ChannelUnavailable {
+                                       err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id),
+                               });
+                       }
+               }
+               for channel_id in channel_ids {
+                       let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
+                       if !channel.update_config(config) {
+                               continue;
+                       }
+                       if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
+                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+                       } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                       node_id: channel.get_counterparty_node_id(),
+                                       msg,
+                               });
                        }
                }
                Ok(())
@@ -2785,11 +2805,11 @@ where
                                                chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias())
                                        },
                                        None => return Err(APIError::ChannelUnavailable {
-                                               err: format!("Channel with id {} not found", log_bytes!(*next_hop_channel_id))
+                                               err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id)
                                        })
                                }
                        } else {
-                               return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", next_node_id) });
+                               return Err(APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) });
                        }
                };
 
@@ -2968,12 +2988,12 @@ where
                                                }
                                        };
                                        let per_peer_state = self.per_peer_state.read().unwrap();
-                                       if let None = per_peer_state.get(&counterparty_node_id) {
+                                       let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+                                       if let None = peer_state_mutex_opt {
                                                forwarding_channel_not_found!();
                                                continue;
                                        }
-                                       let peer_state_mutex = per_peer_state.get(&counterparty_node_id).unwrap();
-                                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                                       let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                        let peer_state = &mut *peer_state_lock;
                                        match peer_state.channel_by_id.entry(forward_chan_id) {
                                                hash_map::Entry::Vacant(_) => {
@@ -3360,11 +3380,11 @@ where
                        {
                                let mut channel_state_lock = self.channel_state.lock().unwrap();
                                let channel_state = &mut *channel_state_lock;
-                               let pending_msg_events = &mut channel_state.pending_msg_events;
                                let per_peer_state = self.per_peer_state.read().unwrap();
                                for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                        let peer_state = &mut *peer_state_lock;
+                                       let pending_msg_events = &mut peer_state.pending_msg_events;
                                        peer_state.channel_by_id.retain(|chan_id, chan| {
                                                let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
                                                if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
@@ -3805,7 +3825,8 @@ where
                }  else { (false, None) };
 
                if found_channel {
-                       if let hash_map::Entry::Occupied(mut chan) = peer_state_opt.as_mut().unwrap().channel_by_id.entry(chan_id) {
+                       let peer_state = &mut *peer_state_opt.as_mut().unwrap();
+                       if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
                                let counterparty_node_id = chan.get().get_counterparty_node_id();
                                match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
                                        Ok(msgs_monitor_option) => {
@@ -3827,8 +3848,8 @@ where
                                                        if let Some((msg, commitment_signed)) = msgs {
                                                                log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
                                                                        log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
-                                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                                       node_id: chan.get().get_counterparty_node_id(),
+                                                               peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                                       node_id: counterparty_node_id,
                                                                        updates: msgs::CommitmentUpdate {
                                                                                update_add_htlcs: Vec::new(),
                                                                                update_fulfill_htlcs: vec![msg],
@@ -4053,15 +4074,15 @@ where
                        };
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let mut peer_state_lock;
+                       let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+                       if let None = peer_state_mutex_opt { return }
+                       peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
                        let mut channel = {
-                               if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
-                                       peer_state_lock = peer_state_mutex.lock().unwrap();
-                                       let peer_state = &mut *peer_state_lock;
-                                       match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
-                                               hash_map::Entry::Occupied(chan) => chan,
-                                               hash_map::Entry::Vacant(_) => return,
-                                       }
-                               } else { return }
+                               match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
+                                       hash_map::Entry::Occupied(chan) => chan,
+                                       hash_map::Entry::Vacant(_) => return,
+                               }
                        };
                        if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
                                return;
@@ -4081,9 +4102,9 @@ where
                                        })
                                } else { None }
                        } else { None };
-                       htlc_forwards = self.handle_channel_resumption(&mut channel_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
+                       htlc_forwards = self.handle_channel_resumption(&mut peer_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
                        if let Some(upd) = channel_update {
-                               channel_state.pending_msg_events.push(upd);
+                               peer_state.pending_msg_events.push(upd);
                        }
 
                        (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
@@ -4146,42 +4167,39 @@ where
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                let per_peer_state = self.per_peer_state.read().unwrap();
-               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(temporary_channel_id.clone()) {
-                               hash_map::Entry::Occupied(mut channel) => {
-                                       if !channel.get().inbound_is_awaiting_accept() {
-                                               return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() });
-                                       }
-                                       if *counterparty_node_id != channel.get().get_counterparty_node_id() {
-                                               return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
-                                       }
-                                       if accept_0conf {
-                                               channel.get_mut().set_0conf();
-                                       } else if channel.get().get_channel_type().requires_zero_conf() {
-                                               let send_msg_err_event = events::MessageSendEvent::HandleError {
-                                                       node_id: channel.get().get_counterparty_node_id(),
-                                                       action: msgs::ErrorAction::SendErrorMessage{
-                                                               msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
-                                                       }
-                                               };
-                                               channel_state.pending_msg_events.push(send_msg_err_event);
-                                               let _ = remove_channel!(self, channel);
-                                               return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
-                                       }
-
-                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
-                                               node_id: channel.get().get_counterparty_node_id(),
-                                               msg: channel.get_mut().accept_inbound_channel(user_channel_id),
-                                       });
+               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+               if let None = peer_state_mutex_opt {
+                       return Err(APIError::APIMisuseError { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) });
+               }
+               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               match peer_state.channel_by_id.entry(temporary_channel_id.clone()) {
+                       hash_map::Entry::Occupied(mut channel) => {
+                               if !channel.get().inbound_is_awaiting_accept() {
+                                       return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() });
                                }
-                               hash_map::Entry::Vacant(_) => {
-                                       return Err(APIError::ChannelUnavailable { err: "Can't accept a channel that doesn't exist".to_owned() });
+                               if accept_0conf {
+                                       channel.get_mut().set_0conf();
+                               } else if channel.get().get_channel_type().requires_zero_conf() {
+                                       let send_msg_err_event = events::MessageSendEvent::HandleError {
+                                               node_id: channel.get().get_counterparty_node_id(),
+                                               action: msgs::ErrorAction::SendErrorMessage{
+                                                       msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
+                                               }
+                                       };
+                                       peer_state.pending_msg_events.push(send_msg_err_event);
+                                       let _ = remove_channel!(self, channel);
+                                       return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
                                }
+
+                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+                                       node_id: channel.get().get_counterparty_node_id(),
+                                       msg: channel.get_mut().accept_inbound_channel(user_channel_id),
+                               });
+                       }
+                       hash_map::Entry::Vacant(_) => {
+                               return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*temporary_channel_id), counterparty_node_id) });
                        }
-               } else {
-                       return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) });
                }
                Ok(())
        }
@@ -4213,41 +4231,41 @@ where
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                let per_peer_state = self.per_peer_state.read().unwrap();
-               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(channel.channel_id()) {
-                               hash_map::Entry::Occupied(_) => {
-                                       self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()))
-                               },
-                               hash_map::Entry::Vacant(entry) => {
-                                       if !self.default_configuration.manually_accept_inbound_channels {
-                                               if channel.get_channel_type().requires_zero_conf() {
-                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
-                                               }
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       msg: channel.accept_inbound_channel(user_channel_id),
-                                               });
-                                       } else {
-                                               let mut pending_events = self.pending_events.lock().unwrap();
-                                               pending_events.push(
-                                                       events::Event::OpenChannelRequest {
-                                                               temporary_channel_id: msg.temporary_channel_id.clone(),
-                                                               counterparty_node_id: counterparty_node_id.clone(),
-                                                               funding_satoshis: msg.funding_satoshis,
-                                                               push_msat: msg.push_msat,
-                                                               channel_type: channel.get_channel_type().clone(),
-                                                       }
-                                               );
+               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+               if let None = peer_state_mutex_opt {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone()))
+               }
+               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               match peer_state.channel_by_id.entry(channel.channel_id()) {
+                       hash_map::Entry::Occupied(_) => {
+                               self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()))
+                       },
+                       hash_map::Entry::Vacant(entry) => {
+                               if !self.default_configuration.manually_accept_inbound_channels {
+                                       if channel.get_channel_type().requires_zero_conf() {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
                                        }
-
-                                       entry.insert(channel);
+                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+                                               node_id: counterparty_node_id.clone(),
+                                               msg: channel.accept_inbound_channel(user_channel_id),
+                                       });
+                               } else {
+                                       let mut pending_events = self.pending_events.lock().unwrap();
+                                       pending_events.push(
+                                               events::Event::OpenChannelRequest {
+                                                       temporary_channel_id: msg.temporary_channel_id.clone(),
+                                                       counterparty_node_id: counterparty_node_id.clone(),
+                                                       funding_satoshis: msg.funding_satoshis,
+                                                       push_msat: msg.push_msat,
+                                                       channel_type: channel.get_channel_type().clone(),
+                                               }
+                                       );
                                }
+
+                               entry.insert(channel);
                        }
-               } else {
-                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id.clone()))
                }
                Ok(())
        }
@@ -4255,21 +4273,18 @@ where
        fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
                let (value, output_script, user_id) = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
-                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
-                                       hash_map::Entry::Occupied(mut chan) => {
-                                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
-                                               }
-                                               try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), chan);
-                                               (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
-                                       },
-                                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
-                               }
-                       } else {
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
+                       let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+                       if let None = peer_state_mutex_opt {
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id))
+                       }
+                       let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), chan);
+                                       (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
+                               },
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
                        }
                };
                let mut pending_events = self.pending_events.lock().unwrap();
@@ -4287,22 +4302,19 @@ where
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                let per_peer_state = self.per_peer_state.read().unwrap();
+               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+               if let None = peer_state_mutex_opt {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id))
+               }
                let ((funding_msg, monitor, mut channel_ready), mut chan) = {
                        let best_block = *self.best_block.read().unwrap();
-                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
-                                       hash_map::Entry::Occupied(mut chan) => {
-                                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
-                                               }
-                                               (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.keys_manager, &self.logger), chan), chan.remove())
-                                       },
-                                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
-                               }
-                       } else {
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
+                       let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.keys_manager, &self.logger), chan), chan.remove())
+                               },
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
                        }
                };
                // Because we have exclusive ownership of the channel here we can release the peer_state
@@ -4333,8 +4345,7 @@ where
                // It's safe to unwrap as we've held the `per_peer_state` read lock since checking that the
                // peer exists, despite the inner PeerState potentially having no channels after removing
                // the channel above.
-               let peer_state_mutex = per_peer_state.get(counterparty_node_id).unwrap();
-               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(funding_msg.channel_id) {
                        hash_map::Entry::Occupied(_) => {
@@ -4352,12 +4363,12 @@ where
                                                i_e.insert(chan.get_counterparty_node_id());
                                        }
                                }
-                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
+                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
                                        node_id: counterparty_node_id.clone(),
                                        msg: funding_msg,
                                });
                                if let Some(msg) = channel_ready {
-                                       send_channel_ready!(self, channel_state.pending_msg_events, chan, msg);
+                                       send_channel_ready!(self, peer_state.pending_msg_events, chan, msg);
                                }
                                e.insert(chan);
                        }
@@ -4371,42 +4382,40 @@ where
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        let per_peer_state = self.per_peer_state.read().unwrap();
-                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               match peer_state.channel_by_id.entry(msg.channel_id) {
-                                       hash_map::Entry::Occupied(mut chan) => {
-                                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                               }
-                                               let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.keys_manager, &self.logger) {
-                                                       Ok(update) => update,
-                                                       Err(e) => try_chan_entry!(self, Err(e), chan),
-                                               };
-                                               match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
-                                                       ChannelMonitorUpdateStatus::Completed => {},
-                                                       e => {
-                                                               let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
-                                                               if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
-                                                                       // We weren't able to watch the channel to begin with, so no updates should be made on
-                                                                       // it. Previously, full_stack_target found an (unreachable) panic when the
-                                                                       // monitor update contained within `shutdown_finish` was applied.
-                                                                       if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
-                                                                               shutdown_finish.0.take();
-                                                                       }
+                       let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+                       if let None = peer_state_mutex_opt {
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
+                       }
+
+                       let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.channel_id) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.keys_manager, &self.logger) {
+                                               Ok(update) => update,
+                                               Err(e) => try_chan_entry!(self, Err(e), chan),
+                                       };
+                                       match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
+                                               ChannelMonitorUpdateStatus::Completed => {},
+                                               e => {
+                                                       let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
+                                                       if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+                                                               // We weren't able to watch the channel to begin with, so no updates should be made on
+                                                               // it. Previously, full_stack_target found an (unreachable) panic when the
+                                                               // monitor update contained within `shutdown_finish` was applied.
+                                                               if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+                                                                       shutdown_finish.0.take();
                                                                }
-                                                               return res
-                                                       },
-                                               }
-                                               if let Some(msg) = channel_ready {
-                                                       send_channel_ready!(self, channel_state.pending_msg_events, chan.get(), msg);
-                                               }
-                                               funding_tx
-                                       },
-                                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
-                               }
-                       } else {
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                                                       }
+                                                       return res
+                                               },
+                                       }
+                                       if let Some(msg) = channel_ready {
+                                               send_channel_ready!(self, peer_state.pending_msg_events, chan.get(), msg);
+                                       }
+                                       funding_tx
+                               },
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
                log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid());
@@ -4418,45 +4427,42 @@ where
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                let per_peer_state = self.per_peer_state.read().unwrap();
-               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                       }
-                                       let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(),
-                                               self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), chan);
-                                       if let Some(announcement_sigs) = announcement_sigs_opt {
-                                               log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       msg: announcement_sigs,
-                                               });
-                                       } else if chan.get().is_usable() {
-                                               // If we're sending an announcement_signatures, we'll send the (public)
-                                               // channel_update after sending a channel_announcement when we receive our
-                                               // counterparty's announcement_signatures. Thus, we only bother to send a
-                                               // channel_update here if the channel is not public, i.e. we're not sending an
-                                               // announcement_signatures.
-                                               log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id()));
-                                               if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                                               node_id: counterparty_node_id.clone(),
-                                                               msg,
-                                                       });
-                                               }
+               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+               if let None = peer_state_mutex_opt {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id));
+               }
+               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               match peer_state.channel_by_id.entry(msg.channel_id) {
+                       hash_map::Entry::Occupied(mut chan) => {
+                               let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(),
+                                       self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), chan);
+                               if let Some(announcement_sigs) = announcement_sigs_opt {
+                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
+                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+                                               node_id: counterparty_node_id.clone(),
+                                               msg: announcement_sigs,
+                                       });
+                               } else if chan.get().is_usable() {
+                                       // If we're sending an announcement_signatures, we'll send the (public)
+                                       // channel_update after sending a channel_announcement when we receive our
+                                       // counterparty's announcement_signatures. Thus, we only bother to send a
+                                       // channel_update here if the channel is not public, i.e. we're not sending an
+                                       // announcement_signatures.
+                                       log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id()));
+                                       if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                       node_id: counterparty_node_id.clone(),
+                                                       msg,
+                                               });
                                        }
+                               }
 
-                                       emit_channel_ready_event!(self, chan.get_mut());
+                               emit_channel_ready_event!(self, chan.get_mut());
 
-                                       Ok(())
-                               },
-                               hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
-                       }
-               } else {
-                       Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                               Ok(())
+                       },
+                       hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
        }
 
@@ -4466,48 +4472,45 @@ where
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        let per_peer_state = self.per_peer_state.read().unwrap();
-                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
-                                       hash_map::Entry::Occupied(mut chan_entry) => {
-                                               if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
-                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                               }
+                       let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+                       if let None = peer_state_mutex_opt {
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
+                       }
+                       let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
+                               hash_map::Entry::Occupied(mut chan_entry) => {
 
-                                               if !chan_entry.get().received_shutdown() {
-                                                       log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
-                                                               log_bytes!(msg.channel_id),
-                                                               if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
-                                               }
+                                       if !chan_entry.get().received_shutdown() {
+                                               log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+                                                       log_bytes!(msg.channel_id),
+                                                       if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
+                                       }
 
-                                               let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), chan_entry);
-                                               dropped_htlcs = htlcs;
-
-                                               // Update the monitor with the shutdown script if necessary.
-                                               if let Some(monitor_update) = monitor_update {
-                                                       let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
-                                                       let (result, is_permanent) =
-                                                               handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
-                                                       if is_permanent {
-                                                               remove_channel!(self, chan_entry);
-                                                               break result;
-                                                       }
+                                       let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), chan_entry);
+                                       dropped_htlcs = htlcs;
+
+                                       // Update the monitor with the shutdown script if necessary.
+                                       if let Some(monitor_update) = monitor_update {
+                                               let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
+                                               let (result, is_permanent) =
+                                                       handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+                                               if is_permanent {
+                                                       remove_channel!(self, chan_entry);
+                                                       break result;
                                                }
+                                       }
 
-                                               if let Some(msg) = shutdown {
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                                               node_id: *counterparty_node_id,
-                                                               msg,
-                                                       });
-                                               }
+                                       if let Some(msg) = shutdown {
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                       node_id: *counterparty_node_id,
+                                                       msg,
+                                               });
+                                       }
 
-                                               break Ok(());
-                                       },
-                                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
-                               }
-                       } else {
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                                       break Ok(());
+                               },
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
                for htlc_source in dropped_htlcs.drain(..) {
@@ -4521,38 +4524,33 @@ where
        }
 
        fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+               if let None = peer_state_mutex_opt {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
+               }
                let (tx, chan_option) = {
-                       let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = &mut *channel_state_lock;
-                       let per_peer_state = self.per_peer_state.read().unwrap();
-                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
-                                       hash_map::Entry::Occupied(mut chan_entry) => {
-                                               if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
-                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                               }
-                                               let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry);
-                                               if let Some(msg) = closing_signed {
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                                               node_id: counterparty_node_id.clone(),
-                                                               msg,
-                                                       });
-                                               }
-                                               if tx.is_some() {
-                                                       // We're done with this channel, we've got a signed closing transaction and
-                                                       // will send the closing_signed back to the remote peer upon return. This
-                                                       // also implies there are no pending HTLCs left on the channel, so we can
-                                                       // fully delete it from tracking (the channel monitor is still around to
-                                                       // watch for old state broadcasts)!
-                                                       (tx, Some(remove_channel!(self, chan_entry)))
-                                               } else { (tx, None) }
-                                       },
-                                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
-                               }
-                       } else {
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                       let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
+                               hash_map::Entry::Occupied(mut chan_entry) => {
+                                       let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry);
+                                       if let Some(msg) = closing_signed {
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+                                                       node_id: counterparty_node_id.clone(),
+                                                       msg,
+                                               });
+                                       }
+                                       if tx.is_some() {
+                                               // We're done with this channel, we've got a signed closing transaction and
+                                               // will send the closing_signed back to the remote peer upon return. This
+                                               // also implies there are no pending HTLCs left on the channel, so we can
+                                               // fully delete it from tracking (the channel monitor is still around to
+                                               // watch for old state broadcasts)!
+                                               (tx, Some(remove_channel!(self, chan_entry)))
+                                       } else { (tx, None) }
+                               },
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
                if let Some(broadcast_tx) = tx {
@@ -4561,8 +4559,9 @@ where
                }
                if let Some(chan) = chan_option {
                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                               let mut channel_state = self.channel_state.lock().unwrap();
-                               channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                        msg: update
                                });
                        }
@@ -4583,43 +4582,40 @@ where
 
                let pending_forward_info = self.decode_update_add_htlc_onion(msg);
                let per_peer_state = self.per_peer_state.read().unwrap();
-               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
+               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+               if let None = peer_state_mutex_opt {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
+               }
+               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               match peer_state.channel_by_id.entry(msg.channel_id) {
+                       hash_map::Entry::Occupied(mut chan) => {
+
+                               let create_pending_htlc_status = |chan: &Channel<<K::Target as SignerProvider>::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+                                       // If the update_add is completely bogus, the call will Err and we will close,
+                                       // but if we've sent a shutdown and they haven't acknowledged it yet, we just
+                                       // want to reject the new HTLC and fail it backwards instead of forwarding.
+                                       match pending_forward_info {
+                                               PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
+                                                       let reason = if (error_code & 0x1000) != 0 {
+                                                               let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
+                                                               HTLCFailReason::reason(real_code, error_data)
+                                                       } else {
+                                                               HTLCFailReason::from_failure_code(error_code)
+                                                       }.get_encrypted_failure_packet(incoming_shared_secret, &None);
+                                                       let msg = msgs::UpdateFailHTLC {
+                                                               channel_id: msg.channel_id,
+                                                               htlc_id: msg.htlc_id,
+                                                               reason
+                                                       };
+                                                       PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
+                                               },
+                                               _ => pending_forward_info
                                        }
-
-                                       let create_pending_htlc_status = |chan: &Channel<<K::Target as SignerProvider>::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
-                                               // If the update_add is completely bogus, the call will Err and we will close,
-                                               // but if we've sent a shutdown and they haven't acknowledged it yet, we just
-                                               // want to reject the new HTLC and fail it backwards instead of forwarding.
-                                               match pending_forward_info {
-                                                       PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
-                                                               let reason = if (error_code & 0x1000) != 0 {
-                                                                       let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
-                                                                       HTLCFailReason::reason(real_code, error_data)
-                                                               } else {
-                                                                       HTLCFailReason::from_failure_code(error_code)
-                                                               }.get_encrypted_failure_packet(incoming_shared_secret, &None);
-                                                               let msg = msgs::UpdateFailHTLC {
-                                                                       channel_id: msg.channel_id,
-                                                                       htlc_id: msg.htlc_id,
-                                                                       reason
-                                                               };
-                                                               PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
-                                                       },
-                                                       _ => pending_forward_info
-                                               }
-                                       };
-                                       try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan);
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
-                       }
-               } else {
-                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                               };
+                               try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan);
+                       },
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
                Ok(())
        }
@@ -4628,20 +4624,17 @@ where
                let channel_lock = self.channel_state.lock().unwrap();
                let (htlc_source, forwarded_htlc_value) = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
-                       if let None = per_peer_state.get(counterparty_node_id) {
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
+                       let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+                       if let None = peer_state_mutex_opt {
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id));
                        }
-                       let peer_state_mutex = per_peer_state.get(counterparty_node_id).unwrap();
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
-                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                       }
                                        try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan)
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
                self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id);
@@ -4650,45 +4643,39 @@ where
 
        fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
                let per_peer_state = self.per_peer_state.read().unwrap();
-               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                       }
-                                       try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan);
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
-                       }
-               } else {
-                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
+               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+               if let None = peer_state_mutex_opt {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id));
+               }
+               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               match peer_state.channel_by_id.entry(msg.channel_id) {
+                       hash_map::Entry::Occupied(mut chan) => {
+                               try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan);
+                       },
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
                Ok(())
        }
 
        fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
                let per_peer_state = self.per_peer_state.read().unwrap();
-               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                       }
-                                       if (msg.failure_code & 0x8000) == 0 {
-                                               let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
-                                               try_chan_entry!(self, Err(chan_err), chan);
-                                       }
-                                       try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan);
-                                       Ok(())
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
-                       }
-               } else {
-                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+               if let None = peer_state_mutex_opt {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
+               }
+               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               match peer_state.channel_by_id.entry(msg.channel_id) {
+                       hash_map::Entry::Occupied(mut chan) => {
+                               if (msg.failure_code & 0x8000) == 0 {
+                                       let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
+                                       try_chan_entry!(self, Err(chan_err), chan);
+                               }
+                               try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan);
+                               Ok(())
+                       },
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
        }
 
@@ -4696,53 +4683,50 @@ where
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                let per_peer_state = self.per_peer_state.read().unwrap();
-               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                       }
-                                       let (revoke_and_ack, commitment_signed, monitor_update) =
-                                               match chan.get_mut().commitment_signed(&msg, &self.logger) {
-                                                       Err((None, e)) => try_chan_entry!(self, Err(e), chan),
-                                                       Err((Some(update), e)) => {
-                                                               assert!(chan.get().is_awaiting_monitor_update());
-                                                               let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update);
-                                                               try_chan_entry!(self, Err(e), chan);
-                                                               unreachable!();
-                                                       },
-                                                       Ok(res) => res
-                                               };
-                                       let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
-                                       if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
-                                               return Err(e);
-                                       }
+               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+               if let None = peer_state_mutex_opt {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
+               }
+               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               match peer_state.channel_by_id.entry(msg.channel_id) {
+                       hash_map::Entry::Occupied(mut chan) => {
+                               let (revoke_and_ack, commitment_signed, monitor_update) =
+                                       match chan.get_mut().commitment_signed(&msg, &self.logger) {
+                                               Err((None, e)) => try_chan_entry!(self, Err(e), chan),
+                                               Err((Some(update), e)) => {
+                                                       assert!(chan.get().is_awaiting_monitor_update());
+                                                       let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update);
+                                                       try_chan_entry!(self, Err(e), chan);
+                                                       unreachable!();
+                                               },
+                                               Ok(res) => res
+                                       };
+                               let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
+                               if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
+                                       return Err(e);
+                               }
 
-                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+                                       node_id: counterparty_node_id.clone(),
+                                       msg: revoke_and_ack,
+                               });
+                               if let Some(msg) = commitment_signed {
+                                       peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                node_id: counterparty_node_id.clone(),
-                                               msg: revoke_and_ack,
+                                               updates: msgs::CommitmentUpdate {
+                                                       update_add_htlcs: Vec::new(),
+                                                       update_fulfill_htlcs: Vec::new(),
+                                                       update_fail_htlcs: Vec::new(),
+                                                       update_fail_malformed_htlcs: Vec::new(),
+                                                       update_fee: None,
+                                                       commitment_signed: msg,
+                                               },
                                        });
-                                       if let Some(msg) = commitment_signed {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       updates: msgs::CommitmentUpdate {
-                                                               update_add_htlcs: Vec::new(),
-                                                               update_fulfill_htlcs: Vec::new(),
-                                                               update_fail_htlcs: Vec::new(),
-                                                               update_fail_malformed_htlcs: Vec::new(),
-                                                               update_fee: None,
-                                                               commitment_signed: msg,
-                                                       },
-                                               });
-                                       }
-                                       Ok(())
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
-                       }
-               } else {
-                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                               }
+                               Ok(())
+                       },
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
        }
 
@@ -4844,53 +4828,50 @@ where
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        let per_peer_state = self.per_peer_state.read().unwrap();
-                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               match peer_state.channel_by_id.entry(msg.channel_id) {
-                                       hash_map::Entry::Occupied(mut chan) => {
-                                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                                       break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                               }
-                                               let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
-                                               let raa_updates = break_chan_entry!(self,
-                                                       chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
-                                               htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
-                                               let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update);
-                                               if was_paused_for_mon_update {
-                                                       assert!(update_res != ChannelMonitorUpdateStatus::Completed);
-                                                       assert!(raa_updates.commitment_update.is_none());
-                                                       assert!(raa_updates.accepted_htlcs.is_empty());
-                                                       assert!(raa_updates.failed_htlcs.is_empty());
-                                                       assert!(raa_updates.finalized_claimed_htlcs.is_empty());
-                                                       break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
-                                               }
-                                               if update_res != ChannelMonitorUpdateStatus::Completed {
-                                                       if let Err(e) = handle_monitor_update_res!(self, update_res, chan,
-                                                                       RAACommitmentOrder::CommitmentFirst, false,
-                                                                       raa_updates.commitment_update.is_some(), false,
-                                                                       raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
-                                                                       raa_updates.finalized_claimed_htlcs) {
-                                                               break Err(e);
-                                                       } else { unreachable!(); }
-                                               }
-                                               if let Some(updates) = raa_updates.commitment_update {
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                               node_id: counterparty_node_id.clone(),
-                                                               updates,
-                                                       });
-                                               }
-                                               break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
-                                                               raa_updates.finalized_claimed_htlcs,
-                                                               chan.get().get_short_channel_id()
-                                                                       .unwrap_or(chan.get().outbound_scid_alias()),
-                                                               chan.get().get_funding_txo().unwrap(),
-                                                               chan.get().get_user_id()))
-                                       },
-                                       hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
-                               }
-                       } else {
-                               break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                       let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+                       if let None = peer_state_mutex_opt {
+                               break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))
+                       }
+                       let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.channel_id) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
+                                       let raa_updates = break_chan_entry!(self,
+                                               chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
+                                       htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
+                                       let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update);
+                                       if was_paused_for_mon_update {
+                                               assert!(update_res != ChannelMonitorUpdateStatus::Completed);
+                                               assert!(raa_updates.commitment_update.is_none());
+                                               assert!(raa_updates.accepted_htlcs.is_empty());
+                                               assert!(raa_updates.failed_htlcs.is_empty());
+                                               assert!(raa_updates.finalized_claimed_htlcs.is_empty());
+                                               break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
+                                       }
+                                       if update_res != ChannelMonitorUpdateStatus::Completed {
+                                               if let Err(e) = handle_monitor_update_res!(self, update_res, chan,
+                                                               RAACommitmentOrder::CommitmentFirst, false,
+                                                               raa_updates.commitment_update.is_some(), false,
+                                                               raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+                                                               raa_updates.finalized_claimed_htlcs) {
+                                                       break Err(e);
+                                               } else { unreachable!(); }
+                                       }
+                                       if let Some(updates) = raa_updates.commitment_update {
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                       node_id: counterparty_node_id.clone(),
+                                                       updates,
+                                               });
+                                       }
+                                       break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+                                                       raa_updates.finalized_claimed_htlcs,
+                                                       chan.get().get_short_channel_id()
+                                                               .unwrap_or(chan.get().outbound_scid_alias()),
+                                                       chan.get().get_funding_txo().unwrap(),
+                                                       chan.get().get_user_id()))
+                               },
+                               hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
                self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
@@ -4912,20 +4893,17 @@ where
 
        fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
                let per_peer_state = self.per_peer_state.read().unwrap();
-               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                       }
-                                       try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan);
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
-                       }
-               } else {
-                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
+               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+               if let None = peer_state_mutex_opt {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id));
+               }
+               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               match peer_state.channel_by_id.entry(msg.channel_id) {
+                       hash_map::Entry::Occupied(mut chan) => {
+                               try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan);
+                       },
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
                Ok(())
        }
@@ -4934,30 +4912,27 @@ where
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                let per_peer_state = self.per_peer_state.read().unwrap();
-               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                       }
-                                       if !chan.get().is_usable() {
-                                               return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
-                                       }
+               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+               if let None = peer_state_mutex_opt {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id));
+               }
+               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               match peer_state.channel_by_id.entry(msg.channel_id) {
+                       hash_map::Entry::Occupied(mut chan) => {
+                               if !chan.get().is_usable() {
+                                       return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
+                               }
 
-                                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
-                                               msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(
-                                                       self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), chan),
-                                               // Note that announcement_signatures fails if the channel cannot be announced,
-                                               // so get_channel_update_for_broadcast will never fail by the time we get here.
-                                               update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
-                                       });
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
-                       }
-               } else {
-                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
+                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
+                                       msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(
+                                               self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), chan),
+                                       // Note that announcement_signatures fails if the channel cannot be announced,
+                                       // so get_channel_update_for_broadcast will never fail by the time we get here.
+                                       update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
+                               });
+                       },
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
                Ok(())
        }
@@ -4972,33 +4947,33 @@ where
                        }
                };
                let per_peer_state = self.per_peer_state.read().unwrap();
-               if let Some(peer_state_mutex) = per_peer_state.get(&chan_counterparty_node_id) {
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(chan_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                               if chan.get().should_announce() {
-                                                       // If the announcement is about a channel of ours which is public, some
-                                                       // other peer may simply be forwarding all its gossip to us. Don't provide
-                                                       // a scary-looking error message and return Ok instead.
-                                                       return Ok(NotifyOption::SkipPersist);
-                                               }
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
-                                       }
-                                       let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
-                                       let msg_from_node_one = msg.contents.flags & 1 == 0;
-                                       if were_node_one == msg_from_node_one {
+               let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
+               if let None = peer_state_mutex_opt {
+                       return Ok(NotifyOption::SkipPersist)
+               }
+               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               match peer_state.channel_by_id.entry(chan_id) {
+                       hash_map::Entry::Occupied(mut chan) => {
+                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                       if chan.get().should_announce() {
+                                               // If the announcement is about a channel of ours which is public, some
+                                               // other peer may simply be forwarding all its gossip to us. Don't provide
+                                               // a scary-looking error message and return Ok instead.
                                                return Ok(NotifyOption::SkipPersist);
-                                       } else {
-                                               log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id));
-                                               try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan);
                                        }
-                               },
-                               hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
-                       }
-               } else {
-                       return Ok(NotifyOption::SkipPersist)
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
+                               }
+                               let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
+                               let msg_from_node_one = msg.contents.flags & 1 == 0;
+                               if were_node_one == msg_from_node_one {
+                                       return Ok(NotifyOption::SkipPersist);
+                               } else {
+                                       log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id));
+                                       try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan);
+                               }
+                       },
+                       hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
                }
                Ok(NotifyOption::DoPersist)
        }
@@ -5010,51 +4985,48 @@ where
                        let channel_state = &mut *channel_state_lock;
                        let per_peer_state = self.per_peer_state.read().unwrap();
 
-                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               match peer_state.channel_by_id.entry(msg.channel_id) {
-                                       hash_map::Entry::Occupied(mut chan) => {
-                                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
-                                               }
-                                               // Currently, we expect all holding cell update_adds to be dropped on peer
-                                               // disconnect, so Channel's reestablish will never hand us any holding cell
-                                               // freed HTLCs to fail backwards. If in the future we no longer drop pending
-                                               // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
-                                               let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish(
-                                                       msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash,
-                                                       &*self.best_block.read().unwrap()), chan);
-                                               let mut channel_update = None;
-                                               if let Some(msg) = responses.shutdown_msg {
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                                               node_id: counterparty_node_id.clone(),
+                       let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+                       if let None = peer_state_mutex_opt {
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id));
+                       }
+                       let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       match peer_state.channel_by_id.entry(msg.channel_id) {
+                               hash_map::Entry::Occupied(mut chan) => {
+                                       // Currently, we expect all holding cell update_adds to be dropped on peer
+                                       // disconnect, so Channel's reestablish will never hand us any holding cell
+                                       // freed HTLCs to fail backwards. If in the future we no longer drop pending
+                                       // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
+                                       let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish(
+                                               msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash,
+                                               &*self.best_block.read().unwrap()), chan);
+                                       let mut channel_update = None;
+                                       if let Some(msg) = responses.shutdown_msg {
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                       node_id: counterparty_node_id.clone(),
+                                                       msg,
+                                               });
+                                       } else if chan.get().is_usable() {
+                                               // If the channel is in a usable state (ie the channel is not being shut
+                                               // down), send a unicast channel_update to our counterparty to make sure
+                                               // they have the latest channel parameters.
+                                               if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
+                                                       channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
+                                                               node_id: chan.get().get_counterparty_node_id(),
                                                                msg,
                                                        });
-                                               } else if chan.get().is_usable() {
-                                                       // If the channel is in a usable state (ie the channel is not being shut
-                                                       // down), send a unicast channel_update to our counterparty to make sure
-                                                       // they have the latest channel parameters.
-                                                       if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
-                                                               channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
-                                                                       node_id: chan.get().get_counterparty_node_id(),
-                                                                       msg,
-                                                               });
-                                                       }
-                                               }
-                                               let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
-                                               htlc_forwards = self.handle_channel_resumption(
-                                                       &mut channel_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
-                                                       Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
-                                               if let Some(upd) = channel_update {
-                                                       channel_state.pending_msg_events.push(upd);
                                                }
-                                               need_lnd_workaround
-                                       },
-                                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
-                               }
-                       } else {
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
+                                       }
+                                       let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
+                                       htlc_forwards = self.handle_channel_resumption(
+                                               &mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
+                                               Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+                                       if let Some(upd) = channel_update {
+                                               peer_state.pending_msg_events.push(upd);
+                                       }
+                                       need_lnd_workaround
+                               },
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
 
@@ -5103,9 +5075,9 @@ where
                                                if let Some(counterparty_node_id) = counterparty_node_id_opt {
                                                        let per_peer_state = self.per_peer_state.read().unwrap();
                                                        if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
-                                                               let pending_msg_events = &mut channel_state.pending_msg_events;
                                                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                                let peer_state = &mut *peer_state_lock;
+                                                               let pending_msg_events = &mut peer_state.pending_msg_events;
                                                                if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
                                                                        let mut chan = remove_channel!(self, chan_entry);
                                                                        failed_channels.push(chan.force_shutdown(false));
@@ -5162,12 +5134,12 @@ where
                {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
-                       let pending_msg_events = &mut channel_state.pending_msg_events;
                        let per_peer_state = self.per_peer_state.read().unwrap();
 
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
+                               let pending_msg_events = &mut peer_state.pending_msg_events;
                                peer_state.channel_by_id.retain(|channel_id, chan| {
                                        match chan.maybe_free_holding_cell_htlcs(&self.logger) {
                                                Ok((commitment_opt, holding_cell_failed_htlcs)) => {
@@ -5228,12 +5200,12 @@ where
                {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
-                       let pending_msg_events = &mut channel_state.pending_msg_events;
                        let per_peer_state = self.per_peer_state.read().unwrap();
 
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
+                               let pending_msg_events = &mut peer_state.pending_msg_events;
                                peer_state.channel_by_id.retain(|channel_id, chan| {
                                        match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
                                                Ok((msg_opt, tx_opt)) => {
@@ -5581,6 +5553,19 @@ where
        R::Target: Router,
        L::Target: Logger,
 {
+       /// Returns `MessageSendEvent`s strictly ordered per-peer, in the order they were generated.
+       /// The returned array will contain `MessageSendEvent`s for different peers if
+       /// `MessageSendEvent`s to more than one peer exists, but `MessageSendEvent`s to the same peer
+       /// is always placed next to each other.
+       ///
+       /// Note that that while `MessageSendEvent`s are strictly ordered per-peer, the peer order for
+       /// the chunks of `MessageSendEvent`s for different peers is random. I.e. if the array contains
+       /// `MessageSendEvent`s  for both `node_a` and `node_b`, the `MessageSendEvent`s for `node_a`
+       /// will randomly be placed first or last in the returned array.
+       ///
+       /// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate`
+       /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be pleaced among
+       /// the `MessageSendEvent`s to the specific peer they were generated under.
        fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
                let events = RefCell::new(Vec::new());
                PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
@@ -5600,8 +5585,16 @@ where
                        }
 
                        let mut pending_events = Vec::new();
-                       let mut channel_state = self.channel_state.lock().unwrap();
-                       mem::swap(&mut pending_events, &mut channel_state.pending_msg_events);
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               if peer_state.pending_msg_events.len() > 0 {
+                                       let mut peer_pending_events = Vec::new();
+                                       mem::swap(&mut peer_pending_events, &mut peer_state.pending_msg_events);
+                                       pending_events.append(&mut peer_pending_events);
+                               }
+                       }
 
                        if !pending_events.is_empty() {
                                events.replace(pending_events);
@@ -5802,11 +5795,11 @@ where
                {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
-                       let pending_msg_events = &mut channel_state.pending_msg_events;
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
+                               let pending_msg_events = &mut peer_state.pending_msg_events;
                                peer_state.channel_by_id.retain(|_, channel| {
                                        let res = f(channel);
                                        if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
@@ -6092,50 +6085,48 @@ where
                let mut channel_state = self.channel_state.lock().unwrap();
                let mut per_peer_state = self.per_peer_state.write().unwrap();
                {
-                       let pending_msg_events = &mut channel_state.pending_msg_events;
                        log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
                                log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
                        if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
+                               let pending_msg_events = &mut peer_state.pending_msg_events;
                                peer_state.channel_by_id.retain(|_, chan| {
-                                       if chan.get_counterparty_node_id() == *counterparty_node_id {
-                                               chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
-                                               if chan.is_shutdown() {
-                                                       update_maps_on_chan_removal!(self, chan);
-                                                       self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
-                                                       return false;
-                                               } else {
-                                                       no_channels_remain = false;
-                                               }
+                                       chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
+                                       if chan.is_shutdown() {
+                                               update_maps_on_chan_removal!(self, chan);
+                                               self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
+                                               return false;
+                                       } else {
+                                               no_channels_remain = false;
                                        }
                                        true
                                });
+                               pending_msg_events.retain(|msg| {
+                                       match msg {
+                                               &events::MessageSendEvent::SendAcceptChannel { .. } => false,
+                                               &events::MessageSendEvent::SendOpenChannel { .. } => false,
+                                               &events::MessageSendEvent::SendFundingCreated { .. } => false,
+                                               &events::MessageSendEvent::SendFundingSigned { .. } => false,
+                                               &events::MessageSendEvent::SendChannelReady { .. } => false,
+                                               &events::MessageSendEvent::SendAnnouncementSignatures { .. } => false,
+                                               &events::MessageSendEvent::UpdateHTLCs { .. } => false,
+                                               &events::MessageSendEvent::SendRevokeAndACK { .. } => false,
+                                               &events::MessageSendEvent::SendClosingSigned { .. } => false,
+                                               &events::MessageSendEvent::SendShutdown { .. } => false,
+                                               &events::MessageSendEvent::SendChannelReestablish { .. } => false,
+                                               &events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
+                                               &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
+                                               &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+                                               &events::MessageSendEvent::SendChannelUpdate { .. } => false,
+                                               &events::MessageSendEvent::HandleError { .. } => false,
+                                               &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
+                                               &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
+                                               &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
+                                               &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
+                                       }
+                               });
                        }
-                       pending_msg_events.retain(|msg| {
-                               match msg {
-                                       &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::SendChannelReady { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::SendChannelAnnouncement { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
-                                       &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
-                                       &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
-                                       &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
-                                       &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
-                                       &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
-                               }
-                       });
                        mem::drop(channel_state);
                }
                if no_channels_remain {
@@ -6165,6 +6156,7 @@ where
                                        e.insert(Mutex::new(PeerState {
                                                channel_by_id: HashMap::new(),
                                                latest_features: init_msg.features.clone(),
+                                               pending_msg_events: Vec::new(),
                                        }));
                                },
                                hash_map::Entry::Occupied(e) => {
@@ -6175,12 +6167,12 @@ where
 
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
-               let pending_msg_events = &mut channel_state.pending_msg_events;
                let per_peer_state = self.per_peer_state.read().unwrap();
 
                for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
+                       let pending_msg_events = &mut peer_state.pending_msg_events;
                        peer_state.channel_by_id.retain(|_, chan| {
                                let retain = if chan.get_counterparty_node_id() == *counterparty_node_id {
                                        if !chan.have_received_message() {
@@ -6218,33 +6210,36 @@ where
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                if msg.channel_id == [0; 32] {
-                       for chan in self.list_channels() {
-                               if chan.counterparty.node_id == *counterparty_node_id {
-                                       // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
-                                       let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data), true);
-                               }
+                       let channel_ids: Vec<[u8; 32]> = {
+                               let per_peer_state = self.per_peer_state.read().unwrap();
+                               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+                               if let None = peer_state_mutex_opt { return; }
+                               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               peer_state.channel_by_id.keys().cloned().collect()
+                       };
+                       for channel_id in channel_ids {
+                               // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
+                               let _ = self.force_close_channel_with_peer(&channel_id, counterparty_node_id, Some(&msg.data), true);
                        }
                } else {
                        {
                                // First check if we can advance the channel type and try again.
                                let mut channel_state = self.channel_state.lock().unwrap();
                                let per_peer_state = self.per_peer_state.read().unwrap();
-                               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                                       let peer_state = &mut *peer_state_lock;
-                                       if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
-                                               if chan.get_counterparty_node_id() != *counterparty_node_id {
-                                                       return;
-                                               }
-                                               if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) {
-                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
-                                                               node_id: *counterparty_node_id,
-                                                               msg,
-                                                       });
-                                                       return;
-                                               }
+                               let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+                               if let None = peer_state_mutex_opt { return; }
+                               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
+                                       if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) {
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+                                                       node_id: *counterparty_node_id,
+                                                       msg,
+                                               });
+                                               return;
                                        }
-                               } else { return; }
+                               }
                        }
 
                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
@@ -7155,6 +7150,7 @@ where
                        let peer_state = PeerState {
                                channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()),
                                latest_features: Readable::read(reader)?,
+                               pending_msg_events: Vec::new(),
                        };
                        per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
                }
@@ -7488,7 +7484,6 @@ where
                        best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)),
 
                        channel_state: Mutex::new(ChannelHolder {
-                               pending_msg_events: Vec::new(),
                        }),
                        inbound_payment_key: expanded_inbound_key,
                        pending_inbound_payments: Mutex::new(pending_inbound_payments),
@@ -7540,17 +7535,24 @@ where
 mod tests {
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
+       use bitcoin::hashes::hex::FromHex;
+       use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+       use bitcoin::secp256k1::ecdsa::Signature;
+       use bitcoin::secp256k1::ffi::Signature as FFISignature;
+       use bitcoin::blockdata::script::Script;
+       use bitcoin::Txid;
        use core::time::Duration;
        use core::sync::atomic::Ordering;
        use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
-       use crate::ln::channelmanager::{self, inbound_payment, PaymentId, PaymentSendFailure};
+       use crate::ln::channelmanager::{self, inbound_payment, PaymentId, PaymentSendFailure, InterceptId};
        use crate::ln::functional_test_utils::*;
        use crate::ln::msgs;
-       use crate::ln::msgs::ChannelMessageHandler;
+       use crate::ln::msgs::{ChannelMessageHandler, OptionalField};
        use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
        use crate::util::errors::APIError;
        use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
        use crate::util::test_utils;
+       use crate::util::config::ChannelConfig;
        use crate::chain::keysinterface::{EntropySource, KeysInterface};
 
        #[test]
@@ -8111,6 +8113,194 @@ mod tests {
                check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
                check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        }
+
+       fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
+               let expected_message = format!("Not connected to node: {}", expected_public_key);
+               check_api_misuse_error_message(expected_message, res_err)
+       }
+
+       fn check_unkown_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
+               let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key);
+               check_api_misuse_error_message(expected_message, res_err)
+       }
+
+       fn check_api_misuse_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
+               match res_err {
+                       Err(APIError::APIMisuseError { err }) => {
+                               assert_eq!(err, expected_err_message);
+                       },
+                       Ok(_) => panic!("Unexpected Ok"),
+                       Err(_) => panic!("Unexpected Error"),
+               }
+       }
+
+       #[test]
+       fn test_api_calls_with_unkown_counterparty_node() {
+               // Tests that our API functions and message handlers that expects a `counterparty_node_id`
+               // as input, behaves as expected if the `counterparty_node_id` is an unkown peer in the
+               // `ChannelManager::per_peer_state` map.
+               let chanmon_cfg = create_chanmon_cfgs(2);
+               let node_cfg = create_node_cfgs(2, &chanmon_cfg);
+               let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
+               let nodes = create_network(2, &node_cfg, &node_chanmgr);
+
+               // Boilerplate code to produce `open_channel` and `accept_channel` msgs more densly than
+               // creating dummy ones.
+               nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
+               let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), channelmanager::provided_init_features(), &open_channel_msg);
+               let accept_channel_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+
+               // Dummy values
+               let channel_id = [4; 32];
+               let signature = Signature::from(unsafe { FFISignature::new() });
+               let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
+               let intercept_id = InterceptId([0; 32]);
+
+               // Dummy msgs
+               let funding_created_msg = msgs::FundingCreated {
+                       temporary_channel_id: open_channel_msg.temporary_channel_id,
+                       funding_txid: Txid::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(),
+                       funding_output_index: 0,
+                       signature: signature,
+               };
+
+               let funding_signed_msg = msgs::FundingSigned {
+                       channel_id: channel_id,
+                       signature: signature,
+               };
+
+               let channel_ready_msg = msgs::ChannelReady {
+                       channel_id: channel_id,
+                       next_per_commitment_point: unkown_public_key,
+                       short_channel_id_alias: None,
+               };
+
+               let announcement_signatures_msg = msgs::AnnouncementSignatures {
+                       channel_id: channel_id,
+                       short_channel_id: 0,
+                       node_signature: signature,
+                       bitcoin_signature: signature,
+               };
+
+               let channel_reestablish_msg = msgs::ChannelReestablish {
+                       channel_id: channel_id,
+                       next_local_commitment_number: 0,
+                       next_remote_commitment_number: 0,
+                       data_loss_protect: OptionalField::Absent,
+               };
+
+               let closing_signed_msg = msgs::ClosingSigned {
+                       channel_id: channel_id,
+                       fee_satoshis: 1000,
+                       signature: signature,
+                       fee_range: None,
+               };
+
+               let shutdown_msg = msgs::Shutdown {
+                       channel_id: channel_id,
+                       scriptpubkey: Script::new(),
+               };
+
+               let onion_routing_packet = msgs::OnionPacket {
+                       version: 255,
+                       public_key: Ok(unkown_public_key),
+                       hop_data: [1; 20*65],
+                       hmac: [2; 32]
+               };
+
+               let update_add_htlc_msg = msgs::UpdateAddHTLC {
+                       channel_id: channel_id,
+                       htlc_id: 0,
+                       amount_msat: 1000000,
+                       payment_hash: PaymentHash([1; 32]),
+                       cltv_expiry: 821716,
+                       onion_routing_packet
+               };
+
+               let commitment_signed_msg = msgs::CommitmentSigned {
+                       channel_id: channel_id,
+                       signature: signature,
+                       htlc_signatures: Vec::new(),
+               };
+
+               let update_fee_msg = msgs::UpdateFee {
+                       channel_id: channel_id,
+                       feerate_per_kw: 1000,
+               };
+
+               let malformed_update_msg = msgs::UpdateFailMalformedHTLC{
+                       channel_id: channel_id,
+                       htlc_id: 0,
+                       sha256_of_onion: [1; 32],
+                       failure_code: 0x8000,
+               };
+
+               let fulfill_update_msg = msgs::UpdateFulfillHTLC{
+                       channel_id: channel_id,
+                       htlc_id: 0,
+                       payment_preimage: PaymentPreimage([1; 32]),
+               };
+
+               let fail_update_msg = msgs::UpdateFailHTLC{
+                       channel_id: channel_id,
+                       htlc_id: 0,
+                       reason: msgs::OnionErrorPacket { data: Vec::new()},
+               };
+
+               let revoke_and_ack_msg = msgs::RevokeAndACK {
+                       channel_id: channel_id,
+                       per_commitment_secret: [1; 32],
+                       next_per_commitment_point: unkown_public_key,
+               };
+
+               // Test the API functions and message handlers.
+               check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None), unkown_public_key);
+
+               nodes[1].node.handle_open_channel(&unkown_public_key, channelmanager::provided_init_features(), &open_channel_msg);
+
+               nodes[0].node.handle_accept_channel(&unkown_public_key, channelmanager::provided_init_features(), &accept_channel_msg);
+
+               check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&open_channel_msg.temporary_channel_id, &unkown_public_key, 42), unkown_public_key);
+
+               nodes[1].node.handle_funding_created(&unkown_public_key, &funding_created_msg);
+
+               nodes[0].node.handle_funding_signed(&unkown_public_key, &funding_signed_msg);
+
+               nodes[0].node.handle_channel_ready(&unkown_public_key, &channel_ready_msg);
+
+               nodes[1].node.handle_announcement_signatures(&unkown_public_key, &announcement_signatures_msg);
+
+               check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
+
+               check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key), unkown_public_key);
+
+               check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key), unkown_public_key);
+
+               check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
+
+               check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
+
+               nodes[0].node.handle_shutdown(&unkown_public_key, &channelmanager::provided_init_features(), &shutdown_msg);
+
+               nodes[1].node.handle_closing_signed(&unkown_public_key, &closing_signed_msg);
+
+               nodes[0].node.handle_channel_reestablish(&unkown_public_key, &channel_reestablish_msg);
+
+               nodes[1].node.handle_update_add_htlc(&unkown_public_key, &update_add_htlc_msg);
+
+               nodes[1].node.handle_commitment_signed(&unkown_public_key, &commitment_signed_msg);
+
+               nodes[1].node.handle_update_fail_malformed_htlc(&unkown_public_key, &malformed_update_msg);
+
+               nodes[1].node.handle_update_fail_htlc(&unkown_public_key, &fail_update_msg);
+
+               nodes[1].node.handle_update_fulfill_htlc(&unkown_public_key, &fulfill_update_msg);
+
+               nodes[1].node.handle_revoke_and_ack(&unkown_public_key, &revoke_and_ack_msg);
+
+               nodes[1].node.handle_update_fee(&unkown_public_key, &update_fee_msg);
+       }
 }
 
 #[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]