Pass node features through to RouteHops
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 698d463e5f6dea2f0158f4030d1908132dcad9b5..3c13154c7234abcfc3532098f2306b963c9ff7bf 100644 (file)
@@ -274,23 +274,11 @@ pub(super) struct ChannelHolder<ChanSigner: ChannelKeys> {
        /// for broadcast messages, where ordering isn't as strict).
        pub(super) pending_msg_events: Vec<events::MessageSendEvent>,
 }
-pub(super) struct MutChannelHolder<'a, ChanSigner: ChannelKeys + 'a> {
-       pub(super) by_id: &'a mut HashMap<[u8; 32], Channel<ChanSigner>>,
-       pub(super) short_to_id: &'a mut HashMap<u64, [u8; 32]>,
-       pub(super) forward_htlcs: &'a mut HashMap<u64, Vec<HTLCForwardInfo>>,
-       pub(super) claimable_htlcs: &'a mut HashMap<PaymentHash, Vec<(u64, HTLCPreviousHopData)>>,
-       pub(super) pending_msg_events: &'a mut Vec<events::MessageSendEvent>,
-}
-impl<ChanSigner: ChannelKeys> ChannelHolder<ChanSigner> {
-       pub(super) fn borrow_parts(&mut self) -> MutChannelHolder<ChanSigner> {
-               MutChannelHolder {
-                       by_id: &mut self.by_id,
-                       short_to_id: &mut self.short_to_id,
-                       forward_htlcs: &mut self.forward_htlcs,
-                       claimable_htlcs: &mut self.claimable_htlcs,
-                       pending_msg_events: &mut self.pending_msg_events,
-               }
-       }
+
+/// State we hold per-peer. In the future we should put channels in here, but for now we only hold
+/// the latest Init features we heard from the peer.
+struct PeerState {
+       latest_features: InitFeatures,
 }
 
 #[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
@@ -346,6 +334,14 @@ pub struct ChannelManager<ChanSigner: ChannelKeys> {
        channel_state: Mutex<ChannelHolder<ChanSigner>>,
        our_network_key: SecretKey,
 
+       /// The bulk of our storage will eventually be here (channels and message queues and the like).
+       /// If we are connected to a peer we always at least have an entry here, even if no channels
+       /// are currently open with that peer.
+       /// Because adding or removing an entry is rare, we usually take an outer read lock and then
+       /// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
+       /// new channel.
+       per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
+
        pending_events: Mutex<Vec<events::Event>>,
        /// Used when we have to take a BIG lock to make sure everything is self-consistent.
        /// Essentially just when we're serializing ourselves out.
@@ -408,6 +404,10 @@ pub struct ChannelDetails {
        pub short_channel_id: Option<u64>,
        /// The node_id of our counterparty
        pub remote_network_id: PublicKey,
+       /// The Features the channel counterparty provided upon last connection.
+       /// Useful for routing as it is the most up-to-date copy of the counterparty's features and
+       /// many routing-relevant features are present in the init context.
+       pub counterparty_features: InitFeatures,
        /// The value, in satoshis, of this channel as appears in the funding output
        pub channel_value_satoshis: u64,
        /// The user_id passed in to create_channel, or 0 if the channel was inbound.
@@ -628,6 +628,8 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
                        }),
                        our_network_key: keys_manager.get_node_secret(),
 
+                       per_peer_state: RwLock::new(HashMap::new()),
+
                        pending_events: Mutex::new(Vec::new()),
                        total_consistency_lock: RwLock::new(()),
 
@@ -681,50 +683,70 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
        /// Gets the list of open channels, in random order. See ChannelDetail field documentation for
        /// more information.
        pub fn list_channels(&self) -> Vec<ChannelDetails> {
-               let channel_state = self.channel_state.lock().unwrap();
-               let mut res = Vec::with_capacity(channel_state.by_id.len());
-               for (channel_id, channel) in channel_state.by_id.iter() {
-                       let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
-                       res.push(ChannelDetails {
-                               channel_id: (*channel_id).clone(),
-                               short_channel_id: channel.get_short_channel_id(),
-                               remote_network_id: channel.get_their_node_id(),
-                               channel_value_satoshis: channel.get_value_satoshis(),
-                               inbound_capacity_msat,
-                               outbound_capacity_msat,
-                               user_id: channel.get_user_id(),
-                               is_live: channel.is_live(),
-                       });
-               }
-               res
-       }
-
-       /// Gets the list of usable channels, in random order. Useful as an argument to
-       /// Router::get_route to ensure non-announced channels are used.
-       ///
-       /// These are guaranteed to have their is_live value set to true, see the documentation for
-       /// ChannelDetails::is_live for more info on exactly what the criteria are.
-       pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
-               let channel_state = self.channel_state.lock().unwrap();
-               let mut res = Vec::with_capacity(channel_state.by_id.len());
-               for (channel_id, channel) in channel_state.by_id.iter() {
-                       // Note we use is_live here instead of usable which leads to somewhat confused
-                       // internal/external nomenclature, but that's ok cause that's probably what the user
-                       // really wanted anyway.
-                       if channel.is_live() {
+               let mut res = Vec::new();
+               {
+                       let channel_state = self.channel_state.lock().unwrap();
+                       res.reserve(channel_state.by_id.len());
+                       for (channel_id, channel) in channel_state.by_id.iter() {
                                let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
                                res.push(ChannelDetails {
                                        channel_id: (*channel_id).clone(),
                                        short_channel_id: channel.get_short_channel_id(),
                                        remote_network_id: channel.get_their_node_id(),
+                                       counterparty_features: InitFeatures::empty(),
                                        channel_value_satoshis: channel.get_value_satoshis(),
                                        inbound_capacity_msat,
                                        outbound_capacity_msat,
                                        user_id: channel.get_user_id(),
-                                       is_live: true,
+                                       is_live: channel.is_live(),
                                });
                        }
                }
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               for chan in res.iter_mut() {
+                       if let Some(peer_state) = per_peer_state.get(&chan.remote_network_id) {
+                               chan.counterparty_features = peer_state.lock().unwrap().latest_features.clone();
+                       }
+               }
+               res
+       }
+
+       /// Gets the list of usable channels, in random order. Useful as an argument to
+       /// Router::get_route to ensure non-announced channels are used.
+       ///
+       /// These are guaranteed to have their is_live value set to true, see the documentation for
+       /// ChannelDetails::is_live for more info on exactly what the criteria are.
+       pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
+               let mut res = Vec::new();
+               {
+                       let channel_state = self.channel_state.lock().unwrap();
+                       res.reserve(channel_state.by_id.len());
+                       for (channel_id, channel) in channel_state.by_id.iter() {
+                               // Note we use is_live here instead of usable which leads to somewhat confused
+                               // internal/external nomenclature, but that's ok cause that's probably what the user
+                               // really wanted anyway.
+                               if channel.is_live() {
+                                       let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
+                                       res.push(ChannelDetails {
+                                               channel_id: (*channel_id).clone(),
+                                               short_channel_id: channel.get_short_channel_id(),
+                                               remote_network_id: channel.get_their_node_id(),
+                                               counterparty_features: InitFeatures::empty(),
+                                               channel_value_satoshis: channel.get_value_satoshis(),
+                                               inbound_capacity_msat,
+                                               outbound_capacity_msat,
+                                               user_id: channel.get_user_id(),
+                                               is_live: true,
+                                       });
+                               }
+                       }
+               }
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               for chan in res.iter_mut() {
+                       if let Some(peer_state) = per_peer_state.get(&chan.remote_network_id) {
+                               chan.counterparty_features = peer_state.lock().unwrap().latest_features.clone();
+                       }
+               }
                res
        }
 
@@ -738,7 +760,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
 
                let (mut failed_htlcs, chan_option) = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = channel_state_lock.borrow_parts();
+                       let channel_state = &mut *channel_state_lock;
                        match channel_state.by_id.entry(channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_entry) => {
                                        let (shutdown_msg, failed_htlcs) = chan_entry.get_mut().get_shutdown()?;
@@ -795,7 +817,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
 
                let mut chan = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = channel_state_lock.borrow_parts();
+                       let channel_state = &mut *channel_state_lock;
                        if let Some(chan) = channel_state.by_id.remove(channel_id) {
                                if let Some(short_id) = chan.get_short_channel_id() {
                                        channel_state.short_to_id.remove(&short_id);
@@ -1127,7 +1149,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
                                Some(id) => id.clone(),
                        };
 
-                       let channel_state = channel_lock.borrow_parts();
+                       let channel_state = &mut *channel_lock;
                        if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
                                match {
                                        if chan.get().get_their_node_id() != route.hops.first().unwrap().pubkey {
@@ -1275,7 +1297,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
                let mut handle_errors = Vec::new();
                {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = channel_state_lock.borrow_parts();
+                       let channel_state = &mut *channel_state_lock;
 
                        for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() {
                                if short_chan_id != 0 {
@@ -1473,8 +1495,8 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
        pub fn timer_chan_freshness_every_min(&self) {
                let _ = self.total_consistency_lock.read().unwrap();
                let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = channel_state_lock.borrow_parts();
-               for (_, chan) in channel_state.by_id {
+               let channel_state = &mut *channel_state_lock;
+               for (_, chan) in channel_state.by_id.iter_mut() {
                        if chan.is_disabled_staged() && !chan.is_live() {
                                if let Ok(update) = self.get_channel_update(&chan) {
                                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
@@ -1657,7 +1679,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
                                },
                                HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => {
                                        //TODO: Delay the claimed_funds relaying just like we do outbound relay!
-                                       let channel_state = channel_state_lock.borrow_parts();
+                                       let channel_state = &mut *channel_state_lock;
 
                                        let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
                                                Some(chan_id) => chan_id.clone(),
@@ -1729,9 +1751,9 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
 
                {
                        let mut channel_lock = self.channel_state.lock().unwrap();
-                       let channel_state = channel_lock.borrow_parts();
-                       let short_to_id = channel_state.short_to_id;
-                       let pending_msg_events = channel_state.pending_msg_events;
+                       let channel_state = &mut *channel_lock;
+                       let short_to_id = &mut channel_state.short_to_id;
+                       let pending_msg_events = &mut channel_state.pending_msg_events;
                        channel_state.by_id.retain(|_, channel| {
                                if channel.is_awaiting_monitor_update() {
                                        let chan_monitor = channel.channel_monitor().clone();
@@ -1836,7 +1858,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
                let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), their_features, msg, 0, Arc::clone(&self.logger), &self.default_configuration)
                        .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
                let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = channel_state_lock.borrow_parts();
+               let channel_state = &mut *channel_state_lock;
                match channel_state.by_id.entry(channel.channel_id()) {
                        hash_map::Entry::Occupied(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!", msg.temporary_channel_id.clone())),
                        hash_map::Entry::Vacant(entry) => {
@@ -1853,7 +1875,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
        fn internal_accept_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
                let (value, output_script, user_id) = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
-                       let channel_state = channel_lock.borrow_parts();
+                       let channel_state = &mut *channel_lock;
                        match channel_state.by_id.entry(msg.temporary_channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
@@ -1878,7 +1900,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
        fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
                let ((funding_msg, monitor_update), mut chan) = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
-                       let channel_state = channel_lock.borrow_parts();
+                       let channel_state = &mut *channel_lock;
                        match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
@@ -1910,7 +1932,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
                        }
                }
                let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = channel_state_lock.borrow_parts();
+               let channel_state = &mut *channel_state_lock;
                match channel_state.by_id.entry(funding_msg.channel_id) {
                        hash_map::Entry::Occupied(_) => {
                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id", funding_msg.channel_id))
@@ -1929,7 +1951,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
        fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
                let (funding_txo, user_id) = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
-                       let channel_state = channel_lock.borrow_parts();
+                       let channel_state = &mut *channel_lock;
                        match channel_state.by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
@@ -1954,7 +1976,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
 
        fn internal_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = channel_state_lock.borrow_parts();
+               let channel_state = &mut *channel_state_lock;
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
@@ -1985,7 +2007,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
        fn internal_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
                let (mut dropped_htlcs, chan_option) = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = channel_state_lock.borrow_parts();
+                       let channel_state = &mut *channel_state_lock;
 
                        match channel_state.by_id.entry(msg.channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_entry) => {
@@ -2032,7 +2054,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
        fn internal_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
                let (tx, chan_option) = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = channel_state_lock.borrow_parts();
+                       let channel_state = &mut *channel_state_lock;
                        match channel_state.by_id.entry(msg.channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_entry) => {
                                        if chan_entry.get().get_their_node_id() != *their_node_id {
@@ -2086,7 +2108,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
                //but we should prevent it anyway.
 
                let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
-               let channel_state = channel_state_lock.borrow_parts();
+               let channel_state = &mut *channel_state_lock;
 
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
@@ -2135,7 +2157,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
        fn internal_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
                let mut channel_lock = self.channel_state.lock().unwrap();
                let htlc_source = {
-                       let channel_state = channel_lock.borrow_parts();
+                       let channel_state = &mut *channel_lock;
                        match channel_state.by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
@@ -2152,7 +2174,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
 
        fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
                let mut channel_lock = self.channel_state.lock().unwrap();
-               let channel_state = channel_lock.borrow_parts();
+               let channel_state = &mut *channel_lock;
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
@@ -2167,7 +2189,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
 
        fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
                let mut channel_lock = self.channel_state.lock().unwrap();
-               let channel_state = channel_lock.borrow_parts();
+               let channel_state = &mut *channel_lock;
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
@@ -2185,7 +2207,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
 
        fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = channel_state_lock.borrow_parts();
+               let channel_state = &mut *channel_state_lock;
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
@@ -2261,7 +2283,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
        fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
                let (pending_forwards, mut pending_failures, short_channel_id) = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = channel_state_lock.borrow_parts();
+                       let channel_state = &mut *channel_state_lock;
                        match channel_state.by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
@@ -2305,7 +2327,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
 
        fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
                let mut channel_lock = self.channel_state.lock().unwrap();
-               let channel_state = channel_lock.borrow_parts();
+               let channel_state = &mut *channel_lock;
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
@@ -2320,7 +2342,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
 
        fn internal_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = channel_state_lock.borrow_parts();
+               let channel_state = &mut *channel_state_lock;
 
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
@@ -2362,7 +2384,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
 
        fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = channel_state_lock.borrow_parts();
+               let channel_state = &mut *channel_state_lock;
 
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
@@ -2440,7 +2462,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let their_node_id;
                let err: Result<(), _> = loop {
-                       let channel_state = channel_state_lock.borrow_parts();
+                       let channel_state = &mut *channel_state_lock;
 
                        match channel_state.by_id.entry(channel_id) {
                                hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}),
@@ -2543,9 +2565,9 @@ impl<ChanSigner: ChannelKeys> ChainListener for ChannelManager<ChanSigner> {
                let mut failed_channels = Vec::new();
                {
                        let mut channel_lock = self.channel_state.lock().unwrap();
-                       let channel_state = channel_lock.borrow_parts();
-                       let short_to_id = channel_state.short_to_id;
-                       let pending_msg_events = channel_state.pending_msg_events;
+                       let channel_state = &mut *channel_lock;
+                       let short_to_id = &mut channel_state.short_to_id;
+                       let pending_msg_events = &mut channel_state.pending_msg_events;
                        channel_state.by_id.retain(|_, channel| {
                                let chan_res = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched);
                                if let Ok(Some(funding_locked)) = chan_res {
@@ -2621,9 +2643,9 @@ impl<ChanSigner: ChannelKeys> ChainListener for ChannelManager<ChanSigner> {
                let mut failed_channels = Vec::new();
                {
                        let mut channel_lock = self.channel_state.lock().unwrap();
-                       let channel_state = channel_lock.borrow_parts();
-                       let short_to_id = channel_state.short_to_id;
-                       let pending_msg_events = channel_state.pending_msg_events;
+                       let channel_state = &mut *channel_lock;
+                       let short_to_id = &mut channel_state.short_to_id;
+                       let pending_msg_events = &mut channel_state.pending_msg_events;
                        channel_state.by_id.retain(|_,  v| {
                                if v.block_disconnected(header) {
                                        if let Some(short_id) = v.get_short_channel_id() {
@@ -2798,11 +2820,12 @@ impl<ChanSigner: ChannelKeys> ChannelMessageHandler for ChannelManager<ChanSigne
                let _ = self.total_consistency_lock.read().unwrap();
                let mut failed_channels = Vec::new();
                let mut failed_payments = Vec::new();
+               let mut no_channels_remain = true;
                {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = channel_state_lock.borrow_parts();
-                       let short_to_id = channel_state.short_to_id;
-                       let pending_msg_events = channel_state.pending_msg_events;
+                       let channel_state = &mut *channel_state_lock;
+                       let short_to_id = &mut channel_state.short_to_id;
+                       let pending_msg_events = &mut channel_state.pending_msg_events;
                        if no_connection_possible {
                                log_debug!(self, "Failing all channels with {} due to no_connection_possible", log_pubkey!(their_node_id));
                                channel_state.by_id.retain(|_, chan| {
@@ -2836,6 +2859,8 @@ impl<ChanSigner: ChannelKeys> ChannelMessageHandler for ChannelManager<ChanSigne
                                                                short_to_id.remove(&short_id);
                                                        }
                                                        return false;
+                                               } else {
+                                                       no_channels_remain = false;
                                                }
                                        }
                                        true
@@ -2861,6 +2886,10 @@ impl<ChanSigner: ChannelKeys> ChannelMessageHandler for ChannelManager<ChanSigne
                                }
                        });
                }
+               if no_channels_remain {
+                       self.per_peer_state.write().unwrap().remove(their_node_id);
+               }
+
                for failure in failed_channels.drain(..) {
                        self.finish_force_close_channel(failure);
                }
@@ -2871,13 +2900,28 @@ impl<ChanSigner: ChannelKeys> ChannelMessageHandler for ChannelManager<ChanSigne
                }
        }
 
-       fn peer_connected(&self, their_node_id: &PublicKey) {
+       fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) {
                log_debug!(self, "Generating channel_reestablish events for {}", log_pubkey!(their_node_id));
 
                let _ = self.total_consistency_lock.read().unwrap();
+
+               {
+                       let mut peer_state_lock = self.per_peer_state.write().unwrap();
+                       match peer_state_lock.entry(their_node_id.clone()) {
+                               hash_map::Entry::Vacant(e) => {
+                                       e.insert(Mutex::new(PeerState {
+                                               latest_features: init_msg.features.clone(),
+                                       }));
+                               },
+                               hash_map::Entry::Occupied(e) => {
+                                       e.get().lock().unwrap().latest_features = init_msg.features.clone();
+                               },
+                       }
+               }
+
                let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = channel_state_lock.borrow_parts();
-               let pending_msg_events = channel_state.pending_msg_events;
+               let channel_state = &mut *channel_state_lock;
+               let pending_msg_events = &mut channel_state.pending_msg_events;
                channel_state.by_id.retain(|_, chan| {
                        if chan.get_their_node_id() == *their_node_id {
                                if !chan.have_received_message() {
@@ -3141,6 +3185,14 @@ impl<ChanSigner: ChannelKeys + Writeable> Writeable for ChannelManager<ChanSigne
                        }
                }
 
+               let per_peer_state = self.per_peer_state.write().unwrap();
+               (per_peer_state.len() as u64).write(writer)?;
+               for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() {
+                       peer_pubkey.write(writer)?;
+                       let peer_state = peer_state_mutex.lock().unwrap();
+                       peer_state.latest_features.write(writer)?;
+               }
+
                Ok(())
        }
 }
@@ -3274,6 +3326,16 @@ impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArg
                        claimable_htlcs.insert(payment_hash, previous_hops);
                }
 
+               let peer_count: u64 = Readable::read(reader)?;
+               let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, 128));
+               for _ in 0..peer_count {
+                       let peer_pubkey = Readable::read(reader)?;
+                       let peer_state = PeerState {
+                               latest_features: Readable::read(reader)?,
+                       };
+                       per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
+               }
+
                let channel_manager = ChannelManager {
                        genesis_hash,
                        fee_estimator: args.fee_estimator,
@@ -3293,6 +3355,8 @@ impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArg
                        }),
                        our_network_key: args.keys_manager.get_node_secret(),
 
+                       per_peer_state: RwLock::new(per_peer_state),
+
                        pending_events: Mutex::new(Vec::new()),
                        total_consistency_lock: RwLock::new(()),
                        keys_manager: args.keys_manager,