]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Move `short_to_chan_info` into standalone lock
authorViktor Tigerström <11711198+ViktorTigerstrom@users.noreply.github.com>
Tue, 26 Jul 2022 20:59:24 +0000 (22:59 +0200)
committerViktor Tigerström <11711198+ViktorTigerstrom@users.noreply.github.com>
Fri, 4 Nov 2022 19:26:47 +0000 (20:26 +0100)
As the `channel_state` (`ChannelHolder`) struct will be removed, this
commit moves the `short_to_chan_info` map from that lock into a seperate
lock.

lightning/src/ln/channelmanager.rs
lightning/src/ln/reorg_tests.rs

index 63e0bb5ea79a231f95e6f52590739a5c3fdd6420..570f04b628a7a5590d93de0b37edc43b5a5af32f 100644 (file)
@@ -66,7 +66,7 @@ use crate::prelude::*;
 use core::{cmp, mem};
 use core::cell::RefCell;
 use crate::io::Read;
-use crate::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard};
+use crate::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard, FairRwLock};
 use core::sync::atomic::{AtomicUsize, Ordering};
 use core::time::Duration;
 use core::ops::Deref;
@@ -395,12 +395,6 @@ pub(super) enum RAACommitmentOrder {
 // Note this is only exposed in cfg(test):
 pub(super) struct ChannelHolder<Signer: Sign> {
        pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
-       /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
-       ///
-       /// Outbound SCID aliases are added here once the channel is available for normal use, with
-       /// SCIDs being added once the funding transaction is confirmed at the channel's required
-       /// confirmation depth.
-       pub(super) short_to_chan_info: HashMap<u64, (PublicKey, [u8; 32])>,
        /// Map from payment hash to the payment data and any HTLCs which are to us and can be
        /// failed/claimed by the user.
        ///
@@ -680,6 +674,8 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage
 //  |   |
 //  |   |__`id_to_peer`
 //  |   |
+//  |   |__`short_to_chan_info`
+//  |   |
 //  |   |__`per_peer_state`
 //  |       |
 //  |       |__`outbound_scid_aliases`
@@ -786,6 +782,22 @@ pub struct ChannelManager<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        id_to_peer: Mutex<HashMap<[u8; 32], PublicKey>>,
 
+       /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
+       ///
+       /// Outbound SCID aliases are added here once the channel is available for normal use, with
+       /// SCIDs being added once the funding transaction is confirmed at the channel's required
+       /// confirmation depth.
+       ///
+       /// Note that while this holds `counterparty_node_id`s and `channel_id`s, no consistency
+       /// guarantees are made about the existence of a peer with the `counterparty_node_id` nor a
+       /// channel with the `channel_id` in our other maps.
+       ///
+       /// See `ChannelManager` struct-level documentation for lock order requirements.
+       #[cfg(test)]
+       pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, [u8; 32])>>,
+       #[cfg(not(test))]
+       short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, [u8; 32])>>,
+
        our_network_key: SecretKey,
        our_network_pubkey: PublicKey,
 
@@ -1295,9 +1307,11 @@ macro_rules! handle_error {
 }
 
 macro_rules! update_maps_on_chan_removal {
-       ($self: expr, $short_to_chan_info: expr, $channel: expr) => {
+       ($self: expr, $channel: expr) => {{
+               $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id());
+               let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
                if let Some(short_id) = $channel.get_short_channel_id() {
-                       $short_to_chan_info.remove(&short_id);
+                       short_to_chan_info.remove(&short_id);
                } else {
                        // If the channel was never confirmed on-chain prior to its closure, remove the
                        // outbound SCID alias we used for it from the collision-prevention set. While we
@@ -1308,14 +1322,13 @@ macro_rules! update_maps_on_chan_removal {
                        let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias());
                        debug_assert!(alias_removed);
                }
-               $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id());
-               $short_to_chan_info.remove(&$channel.outbound_scid_alias());
-       }
+               short_to_chan_info.remove(&$channel.outbound_scid_alias());
+       }}
 }
 
 /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
 macro_rules! convert_chan_err {
-       ($self: ident, $err: expr, $short_to_chan_info: expr, $channel: expr, $channel_id: expr) => {
+       ($self: ident, $err: expr, $channel: expr, $channel_id: expr) => {
                match $err {
                        ChannelError::Warn(msg) => {
                                (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone()))
@@ -1325,7 +1338,7 @@ macro_rules! convert_chan_err {
                        },
                        ChannelError::Close(msg) => {
                                log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
-                               update_maps_on_chan_removal!($self, $short_to_chan_info, $channel);
+                               update_maps_on_chan_removal!($self, $channel);
                                let shutdown_res = $channel.force_shutdown(true);
                                (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
                                        shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
@@ -1339,7 +1352,7 @@ macro_rules! break_chan_entry {
                match $res {
                        Ok(res) => res,
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key());
+                               let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key());
                                if drop {
                                        $entry.remove_entry();
                                }
@@ -1354,7 +1367,7 @@ macro_rules! try_chan_entry {
                match $res {
                        Ok(res) => res,
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key());
+                               let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key());
                                if drop {
                                        $entry.remove_entry();
                                }
@@ -1368,18 +1381,18 @@ macro_rules! remove_channel {
        ($self: expr, $channel_state: expr, $entry: expr) => {
                {
                        let channel = $entry.remove_entry().1;
-                       update_maps_on_chan_removal!($self, $channel_state.short_to_chan_info, channel);
+                       update_maps_on_chan_removal!($self, channel);
                        channel
                }
        }
 }
 
 macro_rules! handle_monitor_update_res {
-       ($self: ident, $err: expr, $short_to_chan_info: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
+       ($self: ident, $err: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
                match $err {
                        ChannelMonitorUpdateStatus::PermanentFailure => {
                                log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..]));
-                               update_maps_on_chan_removal!($self, $short_to_chan_info, $chan);
+                               update_maps_on_chan_removal!($self, $chan);
                                // TODO: $failed_fails is dropped here, which will cause other channels to hit the
                                // chain in a confused state! We need to move them into the ChannelMonitor which
                                // will be responsible for failing backwards once things confirm on-chain.
@@ -1421,48 +1434,49 @@ macro_rules! handle_monitor_update_res {
                        },
                }
        };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
-               let (res, drop) = handle_monitor_update_res!($self, $err, $channel_state.short_to_chan_info, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
+       ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
+               let (res, drop) = handle_monitor_update_res!($self, $err, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
                if drop {
                        $entry.remove_entry();
                }
                res
        } };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
+       ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
                debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
-               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+               handle_monitor_update_res!($self, $err, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
        } };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
-               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+       ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
+               handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
        };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
-               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
+       ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
+               handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
        };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
-               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
+       ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
+               handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
        };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
-               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
+       ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
+               handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
        };
 }
 
 macro_rules! send_channel_ready {
-       ($short_to_chan_info: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {
+       ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
                $pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
                        node_id: $channel.get_counterparty_node_id(),
                        msg: $channel_ready_msg,
                });
                // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
                // we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
-               let outbound_alias_insert = $short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id()));
+               let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
+               let outbound_alias_insert = short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id()));
                assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
                        "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
                if let Some(real_scid) = $channel.get_short_channel_id() {
-                       let scid_insert = $short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id()));
+                       let scid_insert = short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id()));
                        assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
                                "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
                }
-       }
+       }}
 }
 
 macro_rules! emit_channel_ready_event {
@@ -1516,7 +1530,7 @@ macro_rules! handle_chan_restoration_locked {
                                // Similar to the above, this implies that we're letting the channel_ready fly
                                // before it should be allowed to.
                                assert!(chanmon_update.is_none());
-                               send_channel_ready!($channel_state.short_to_chan_info, $channel_state.pending_msg_events, $channel_entry.get(), msg);
+                               send_channel_ready!($self, $channel_state.pending_msg_events, $channel_entry.get(), msg);
                        }
                        if let Some(msg) = $announcement_sigs {
                                $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
@@ -1549,7 +1563,7 @@ macro_rules! handle_chan_restoration_locked {
                                                if $raa.is_none() {
                                                        order = RAACommitmentOrder::CommitmentFirst;
                                                }
-                                               break handle_monitor_update_res!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
+                                               break handle_monitor_update_res!($self, e, $channel_entry, order, $raa.is_some(), true);
                                        }
                                }
                        }
@@ -1643,7 +1657,6 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
                        channel_state: Mutex::new(ChannelHolder{
                                by_id: HashMap::new(),
-                               short_to_chan_info: HashMap::new(),
                                claimable_htlcs: HashMap::new(),
                                pending_msg_events: Vec::new(),
                        }),
@@ -1652,6 +1665,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                        pending_outbound_payments: Mutex::new(HashMap::new()),
                        forward_htlcs: Mutex::new(HashMap::new()),
                        id_to_peer: Mutex::new(HashMap::new()),
+                       short_to_chan_info: FairRwLock::new(HashMap::new()),
 
                        our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(),
                        our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret(Recipient::Node).unwrap()),
@@ -1903,7 +1917,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                        if let Some(monitor_update) = monitor_update {
                                                let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
                                                let (result, is_permanent) =
-                                                       handle_monitor_update_res!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+                                                       handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
                                                if is_permanent {
                                                        remove_channel!(self, channel_state, chan_entry);
                                                        break result;
@@ -2288,8 +2302,8 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                        // short_channel_id is non-0 in any ::Forward.
                        if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
                                if let Some((err, code, chan_update)) = loop {
+                                       let id_option = self.short_to_chan_info.read().unwrap().get(&short_channel_id).cloned();
                                        let mut channel_state = self.channel_state.lock().unwrap();
-                                       let id_option = channel_state.short_to_chan_info.get(&short_channel_id).cloned();
                                        let forwarding_id_opt = match id_option {
                                                None => { // unknown_next_peer
                                                        // Note that this is likely a timing oracle for detecting whether an scid is a
@@ -2468,13 +2482,12 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let err: Result<(), _> = loop {
-                       let mut channel_lock = self.channel_state.lock().unwrap();
-
-                       let id = match channel_lock.short_to_chan_info.get(&path.first().unwrap().short_channel_id) {
+                       let id = match self.short_to_chan_info.read().unwrap().get(&path.first().unwrap().short_channel_id) {
                                None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
                                Some((_cp_id, chan_id)) => chan_id.clone(),
                        };
 
+                       let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
                                match {
@@ -2499,7 +2512,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
                                                let chan_id = chan.get().channel_id();
                                                match (update_err,
-                                                       handle_monitor_update_res!(self, update_err, channel_state, chan,
+                                                       handle_monitor_update_res!(self, update_err, chan,
                                                                RAACommitmentOrder::CommitmentFirst, false, true))
                                                {
                                                        (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
@@ -3120,7 +3133,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                let mut channel_state_lock = self.channel_state.lock().unwrap();
                                let channel_state = &mut *channel_state_lock;
                                if short_chan_id != 0 {
-                                       let forward_chan_id = match channel_state.short_to_chan_info.get(&short_chan_id) {
+                                       let forward_chan_id = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) {
                                                Some((_cp_id, chan_id)) => chan_id.clone(),
                                                None => {
                                                        for forward_info in pending_forwards.drain(..) {
@@ -3319,7 +3332,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                        match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                                ChannelMonitorUpdateStatus::Completed => {},
                                                                e => {
-                                                                       handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
+                                                                       handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
                                                                        continue;
                                                                }
                                                        }
@@ -3563,7 +3576,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                self.process_background_events();
        }
 
-       fn update_channel_fee(&self, short_to_chan_info: &mut HashMap<u64, (PublicKey, [u8; 32])>, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<<K::Target as KeysInterface>::Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
+       fn update_channel_fee(&self, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<<K::Target as KeysInterface>::Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
                if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); }
                // If the feerate has decreased by less than half, don't bother
                if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
@@ -3583,7 +3596,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) {
                        Ok(res) => Ok(res),
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id);
+                               let (drop, res) = convert_chan_err!(self, e, chan, chan_id);
                                if drop { retain_channel = false; }
                                Err(res)
                        }
@@ -3606,7 +3619,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                Ok(())
                                        },
                                        e => {
-                                               let (res, drop) = handle_monitor_update_res!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
+                                               let (res, drop) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
                                                if drop { retain_channel = false; }
                                                res
                                        }
@@ -3634,9 +3647,8 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                let mut channel_state_lock = self.channel_state.lock().unwrap();
                                let channel_state = &mut *channel_state_lock;
                                let pending_msg_events = &mut channel_state.pending_msg_events;
-                               let short_to_chan_info = &mut channel_state.short_to_chan_info;
                                channel_state.by_id.retain(|chan_id, chan| {
-                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate);
+                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(pending_msg_events, chan_id, chan, new_feerate);
                                        if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
                                        if err.is_err() {
                                                handle_errors.push(err);
@@ -3713,10 +3725,9 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                let mut channel_state_lock = self.channel_state.lock().unwrap();
                                let channel_state = &mut *channel_state_lock;
                                let pending_msg_events = &mut channel_state.pending_msg_events;
-                               let short_to_chan_info = &mut channel_state.short_to_chan_info;
                                channel_state.by_id.retain(|chan_id, chan| {
                                        let counterparty_node_id = chan.get_counterparty_node_id();
-                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate);
+                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(pending_msg_events, chan_id, chan, new_feerate);
                                        if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
                                        if err.is_err() {
                                                handle_errors.push((err, counterparty_node_id));
@@ -3724,7 +3735,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                        if !retain_channel { return false; }
 
                                        if let Err(e) = chan.timer_check_closing_negotiation_progress() {
-                                               let (needs_close, err) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id);
+                                               let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id);
                                                handle_errors.push((Err(err), chan.get_counterparty_node_id()));
                                                if needs_close { return false; }
                                        }
@@ -4142,10 +4153,19 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        for htlc in sources.iter() {
-                               if let None = channel_state.short_to_chan_info.get(&htlc.prev_hop.short_channel_id) {
+                               let chan_id = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
+                                       Some((_cp_id, chan_id)) => chan_id.clone(),
+                                       None => {
+                                               valid_mpp = false;
+                                               break;
+                                       }
+                               };
+
+                               if let None = channel_state.by_id.get(&chan_id) {
                                        valid_mpp = false;
                                        break;
                                }
+
                                if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) {
                                        log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!");
                                        debug_assert!(false);
@@ -4229,7 +4249,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
                //TODO: Delay the claimed_funds relaying just like we do outbound relay!
                let channel_state = &mut **channel_state_lock;
-               let chan_id = match channel_state.short_to_chan_info.get(&prev_hop.short_channel_id) {
+               let chan_id = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
                        Some((_cp_id, chan_id)) => chan_id.clone(),
                        None => {
                                return ClaimFundsFromHop::PrevHopForceClosed
@@ -4248,7 +4268,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                                        payment_preimage, e);
                                                                return ClaimFundsFromHop::MonitorUpdateFail(
                                                                        chan.get().get_counterparty_node_id(),
-                                                                       handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
+                                                                       handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
                                                                        Some(htlc_value_msat)
                                                                );
                                                        }
@@ -4283,7 +4303,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                },
                                        }
                                        let counterparty_node_id = chan.get().get_counterparty_node_id();
-                                       let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_chan_info, chan.get_mut(), &chan_id);
+                                       let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
                                        if drop {
                                                chan.remove_entry();
                                        }
@@ -4698,7 +4718,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                        msg: funding_msg,
                                });
                                if let Some(msg) = channel_ready {
-                                       send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan, msg);
+                                       send_channel_ready!(self, channel_state.pending_msg_events, chan, msg);
                                }
                                e.insert(chan);
                        }
@@ -4723,7 +4743,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                        match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
                                                ChannelMonitorUpdateStatus::Completed => {},
                                                e => {
-                                                       let mut res = handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
+                                                       let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
                                                        if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
                                                                // We weren't able to watch the channel to begin with, so no updates should be made on
                                                                // it. Previously, full_stack_target found an (unreachable) panic when the
@@ -4736,7 +4756,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                },
                                        }
                                        if let Some(msg) = channel_ready {
-                                               send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan.get(), msg);
+                                               send_channel_ready!(self, channel_state.pending_msg_events, chan.get(), msg);
                                        }
                                        funding_tx
                                },
@@ -4812,7 +4832,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                        if let Some(monitor_update) = monitor_update {
                                                let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
                                                let (result, is_permanent) =
-                                                       handle_monitor_update_res!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+                                                       handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
                                                if is_permanent {
                                                        remove_channel!(self, channel_state, chan_entry);
                                                        break result;
@@ -5005,7 +5025,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                Ok(res) => res
                                        };
                                let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
-                               if let Err(e) = handle_monitor_update_res!(self, update_res, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
+                               if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
                                        return Err(e);
                                }
 
@@ -5094,7 +5114,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
                                        }
                                        if update_res != ChannelMonitorUpdateStatus::Completed {
-                                               if let Err(e) = handle_monitor_update_res!(self, update_res, channel_state, chan,
+                                               if let Err(e) = handle_monitor_update_res!(self, update_res, chan,
                                                                RAACommitmentOrder::CommitmentFirst, false,
                                                                raa_updates.commitment_update.is_some(), false,
                                                                raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
@@ -5177,15 +5197,15 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
        /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
        fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
-               let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = &mut *channel_state_lock;
-               let chan_id = match channel_state.short_to_chan_info.get(&msg.contents.short_channel_id) {
+               let chan_id = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
                        Some((_cp_id, chan_id)) => chan_id.clone(),
                        None => {
                                // It's not a local channel
                                return Ok(NotifyOption::SkipPersist)
                        }
                };
+               let mut channel_state_lock = self.channel_state.lock().unwrap();
+               let channel_state = &mut *channel_state_lock;
                match channel_state.by_id.entry(chan_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_counterparty_node_id() != *counterparty_node_id {
@@ -5351,7 +5371,6 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        let by_id = &mut channel_state.by_id;
-                       let short_to_chan_info = &mut channel_state.short_to_chan_info;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
 
                        by_id.retain(|channel_id, chan| {
@@ -5374,7 +5393,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                                },
                                                                e => {
                                                                        has_monitor_update = true;
-                                                                       let (res, close_channel) = handle_monitor_update_res!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
+                                                                       let (res, close_channel) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
                                                                        handle_errors.push((chan.get_counterparty_node_id(), res));
                                                                        if close_channel { return false; }
                                                                },
@@ -5383,7 +5402,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                                                true
                                        },
                                        Err(e) => {
-                                               let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id);
+                                               let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
                                                handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
                                                // ChannelClosed event is generated by handle_error for us
                                                !close_channel
@@ -5414,7 +5433,6 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        let by_id = &mut channel_state.by_id;
-                       let short_to_chan_info = &mut channel_state.short_to_chan_info;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
 
                        by_id.retain(|channel_id, chan| {
@@ -5439,13 +5457,13 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
 
                                                        log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
                                                        self.tx_broadcaster.broadcast_transaction(&tx);
-                                                       update_maps_on_chan_removal!(self, short_to_chan_info, chan);
+                                                       update_maps_on_chan_removal!(self, chan);
                                                        false
                                                } else { true }
                                        },
                                        Err(e) => {
                                                has_update = true;
-                                               let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id);
+                                               let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
                                                handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
                                                !close_channel
                                        }
@@ -5635,14 +5653,14 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
        ///
        /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
        pub fn get_phantom_scid(&self) -> u64 {
-               let mut channel_state = self.channel_state.lock().unwrap();
-               let best_block = self.best_block.read().unwrap();
+               let best_block_height = self.best_block.read().unwrap().height();
+               let short_to_chan_info = self.short_to_chan_info.read().unwrap();
                loop {
-                       let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block.height(), &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager);
+                       let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager);
                        // Ensure the generated scid doesn't conflict with a real channel.
-                       match channel_state.short_to_chan_info.entry(scid_candidate) {
-                               hash_map::Entry::Occupied(_) => continue,
-                               hash_map::Entry::Vacant(_) => return scid_candidate
+                       match short_to_chan_info.get(&scid_candidate) {
+                               Some(_) => continue,
+                               None => return scid_candidate
                        }
                }
        }
@@ -5855,7 +5873,7 @@ where
 
        fn get_relevant_txids(&self) -> Vec<Txid> {
                let channel_state = self.channel_state.lock().unwrap();
-               let mut res = Vec::with_capacity(channel_state.short_to_chan_info.len());
+               let mut res = Vec::with_capacity(channel_state.by_id.len());
                for chan in channel_state.by_id.values() {
                        if let Some(funding_txo) = chan.get_funding_txo() {
                                res.push(funding_txo.txid);
@@ -5898,7 +5916,6 @@ where
                {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
-                       let short_to_chan_info = &mut channel_state.short_to_chan_info;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
                        channel_state.by_id.retain(|_, channel| {
                                let res = f(channel);
@@ -5910,7 +5927,7 @@ where
                                                }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
                                        }
                                        if let Some(channel_ready) = channel_ready_opt {
-                                               send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready);
+                                               send_channel_ready!(self, pending_msg_events, channel, channel_ready);
                                                if channel.is_usable() {
                                                        log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
                                                        if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
@@ -5951,6 +5968,7 @@ where
                                                        // enforce option_scid_alias then), and if the funding tx is ever
                                                        // un-confirmed we force-close the channel, ensuring short_to_chan_info
                                                        // is always consistent.
+                                                       let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
                                                        let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id()));
                                                        assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()),
                                                                "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
@@ -5958,7 +5976,7 @@ where
                                                }
                                        }
                                } else if let Err(reason) = res {
-                                       update_maps_on_chan_removal!(self, short_to_chan_info, channel);
+                                       update_maps_on_chan_removal!(self, channel);
                                        // It looks like our counterparty went on-chain or funding transaction was
                                        // reorged out of the main chain. Close the channel.
                                        failed_channels.push(channel.force_shutdown(true));
@@ -6154,14 +6172,13 @@ impl<M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
-                       let short_to_chan_info = &mut channel_state.short_to_chan_info;
                        log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
                                log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
                        channel_state.by_id.retain(|_, chan| {
                                if chan.get_counterparty_node_id() == *counterparty_node_id {
                                        chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
                                        if chan.is_shutdown() {
-                                               update_maps_on_chan_removal!(self, short_to_chan_info, chan);
+                                               update_maps_on_chan_removal!(self, chan);
                                                self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
                                                return false;
                                        } else {
@@ -7351,7 +7368,6 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
                        channel_state: Mutex::new(ChannelHolder {
                                by_id,
-                               short_to_chan_info,
                                claimable_htlcs,
                                pending_msg_events: Vec::new(),
                        }),
@@ -7362,6 +7378,7 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        forward_htlcs: Mutex::new(forward_htlcs),
                        outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
                        id_to_peer: Mutex::new(id_to_peer),
+                       short_to_chan_info: FairRwLock::new(short_to_chan_info),
                        fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
 
                        probing_cookie_secret: probing_cookie_secret.unwrap(),
index a80a16593b3f62b4f71a31a3d0521d31eb530fa3..3481e8d6cd9f39b60e4b186d8cf403f63ebc6afd 100644 (file)
@@ -275,7 +275,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
 
        let channel_state = nodes[0].node.channel_state.lock().unwrap();
        assert_eq!(channel_state.by_id.len(), 1);
-       assert_eq!(channel_state.short_to_chan_info.len(), 2);
+       assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 2);
        mem::drop(channel_state);
 
        if !reorg_after_reload {
@@ -295,7 +295,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
                {
                        let channel_state = nodes[0].node.channel_state.lock().unwrap();
                        assert_eq!(channel_state.by_id.len(), 0);
-                       assert_eq!(channel_state.short_to_chan_info.len(), 0);
+                       assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 0);
                }
        }
 
@@ -364,7 +364,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
                {
                        let channel_state = nodes[0].node.channel_state.lock().unwrap();
                        assert_eq!(channel_state.by_id.len(), 0);
-                       assert_eq!(channel_state.short_to_chan_info.len(), 0);
+                       assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 0);
                }
        }
        // With expect_channel_force_closed set the TestChainMonitor will enforce that the next update