Consider all channel maps in `update_partial_channel_config`
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 111d79ae3c2df5a39a93120bec2c22fd75b2515b..75ca8b1376cfa67225461e975ce270ec8a963d4e 100644 (file)
@@ -2246,6 +2246,7 @@ where
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
+                               // Only `Channels` in the channel_by_id map can be considered funded.
                                for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
                                        let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
                                                peer_state.latest_features.clone(), &self.fee_estimator);
@@ -2314,11 +2315,15 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        let features = &peer_state.latest_features;
+                       let chan_context_to_details = |context| {
+                               ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator)
+                       };
                        return peer_state.channel_by_id
                                .iter()
-                               .map(|(_, channel)|
-                                       ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                       features.clone(), &self.fee_estimator))
+                               .map(|(_, channel)| &channel.context)
+                               .chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
+                               .chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
+                               .map(chan_context_to_details)
                                .collect();
                }
                vec![]
@@ -2375,48 +2380,58 @@ where
 
                let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
                let result: Result<(), _> = loop {
-                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       {
+                               let per_peer_state = self.per_peer_state.read().unwrap();
 
-                       let peer_state_mutex = per_peer_state.get(counterparty_node_id)
-                               .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+                               let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+                                       .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
 
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(channel_id.clone()) {
-                               hash_map::Entry::Occupied(mut chan_entry) => {
-                                       let funding_txo_opt = chan_entry.get().context.get_funding_txo();
-                                       let their_features = &peer_state.latest_features;
-                                       let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
-                                               .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
-                                       failed_htlcs = htlcs;
-
-                                       // We can send the `shutdown` message before updating the `ChannelMonitor`
-                                       // here as we don't need the monitor update to complete until we send a
-                                       // `shutdown_signed`, which we'll delay if we're pending a monitor update.
-                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                               node_id: *counterparty_node_id,
-                                               msg: shutdown_msg,
-                                       });
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
 
-                                       // Update the monitor with the shutdown script if necessary.
-                                       if let Some(monitor_update) = monitor_update_opt.take() {
-                                               break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
-                                                       peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
-                                       }
+                               match peer_state.channel_by_id.entry(channel_id.clone()) {
+                                       hash_map::Entry::Occupied(mut chan_entry) => {
+                                               let funding_txo_opt = chan_entry.get().context.get_funding_txo();
+                                               let their_features = &peer_state.latest_features;
+                                               let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
+                                                       .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
+                                               failed_htlcs = htlcs;
 
-                                       if chan_entry.get().is_shutdown() {
-                                               let channel = remove_channel!(self, chan_entry);
-                                               if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
-                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                               msg: channel_update
-                                                       });
+                                               // We can send the `shutdown` message before updating the `ChannelMonitor`
+                                               // here as we don't need the monitor update to complete until we send a
+                                               // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                       node_id: *counterparty_node_id,
+                                                       msg: shutdown_msg,
+                                               });
+
+                                               // Update the monitor with the shutdown script if necessary.
+                                               if let Some(monitor_update) = monitor_update_opt.take() {
+                                                       break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+                                                               peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
                                                }
-                                               self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
-                                       }
-                                       break Ok(());
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), counterparty_node_id) })
+
+                                               if chan_entry.get().is_shutdown() {
+                                                       let channel = remove_channel!(self, chan_entry);
+                                                       if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
+                                                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                       msg: channel_update
+                                                               });
+                                                       }
+                                                       self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
+                                               }
+                                               break Ok(());
+                                       },
+                                       hash_map::Entry::Vacant(_) => (),
+                               }
                        }
+                       // If we reach this point, it means that the channel_id either refers to an unfunded channel or
+                       // it does not exist for this peer. Either way, we can attempt to force-close it.
+                       //
+                       // An appropriate error will be returned for non-existence of the channel if that's the case.
+                       return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
+                       // TODO(dunxen): This is still not ideal as we're doing some extra lookups.
+                       // Fix this with https://github.com/lightningdevkit/rust-lightning/issues/2422
                };
 
                for htlc_source in failed_htlcs.drain(..) {
@@ -3523,27 +3538,48 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                for channel_id in channel_ids {
-                       if !peer_state.channel_by_id.contains_key(channel_id) {
+                       if !peer_state.has_channel(channel_id) {
                                return Err(APIError::ChannelUnavailable {
                                        err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id),
                                });
-                       }
+                       };
                }
                for channel_id in channel_ids {
-                       let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
-                       let mut config = channel.context.config();
-                       config.apply(config_update);
-                       if !channel.context.update_config(&config) {
+                       if let Some(channel) = peer_state.channel_by_id.get_mut(channel_id) {
+                               let mut config = channel.context.config();
+                               config.apply(config_update);
+                               if !channel.context.update_config(&config) {
+                                       continue;
+                               }
+                               if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
+                                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+                               } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                               node_id: channel.context.get_counterparty_node_id(),
+                                               msg,
+                                       });
+                               }
                                continue;
                        }
-                       if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
-                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
-                       } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
-                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                       node_id: channel.context.get_counterparty_node_id(),
-                                       msg,
+
+                       let context = if let Some(channel) = peer_state.inbound_v1_channel_by_id.get_mut(channel_id) {
+                               &mut channel.context
+                       } else if let Some(channel) = peer_state.outbound_v1_channel_by_id.get_mut(channel_id) {
+                               &mut channel.context
+                       } else {
+                               // This should not be reachable as we've already checked for non-existence in the previous channel_id loop.
+                               debug_assert!(false);
+                               return Err(APIError::ChannelUnavailable {
+                                       err: format!(
+                                               "Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!",
+                                               log_bytes!(*channel_id), counterparty_node_id),
                                });
-                       }
+                       };
+                       let mut config = context.config();
+                       config.apply(config_update);
+                       // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready`
+                       // which would be the case for pending inbound/outbound channels.
+                       context.update_config(&config);
                }
                Ok(())
        }
@@ -7226,37 +7262,20 @@ where
                log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
 
                let per_peer_state = self.per_peer_state.read().unwrap();
-               for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        let pending_msg_events = &mut peer_state.pending_msg_events;
-                       peer_state.channel_by_id.retain(|_, chan| {
-                               let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id {
-                                       if !chan.context.have_received_message() {
-                                               // If we created this (outbound) channel while we were disconnected from the
-                                               // peer we probably failed to send the open_channel message, which is now
-                                               // lost. We can't have had anything pending related to this channel, so we just
-                                               // drop it.
-                                               false
-                                       } else {
-                                               pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
-                                                       node_id: chan.context.get_counterparty_node_id(),
-                                                       msg: chan.get_channel_reestablish(&self.logger),
-                                               });
-                                               true
-                                       }
-                               } else { true };
-                               if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id {
-                                       if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) {
-                                               if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
-                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
-                                                               node_id: *counterparty_node_id,
-                                                               msg, update_msg,
-                                                       });
-                                               }
-                                       }
-                               }
-                               retain
+
+                       // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
+                       // (so won't be recovered after a crash) we don't need to bother closing unfunded channels and
+                       // clearing their maps here. Instead we can just send queue channel_reestablish messages for
+                       // channels in the channel_by_id map.
+                       peer_state.channel_by_id.iter_mut().for_each(|(_, chan)| {
+                               pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+                                       node_id: chan.context.get_counterparty_node_id(),
+                                       msg: chan.get_channel_reestablish(&self.logger),
+                               });
                        });
                }
                //TODO: Also re-broadcast announcement_signatures
@@ -10186,6 +10205,25 @@ mod tests {
                        MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                        _ => panic!("expected BroadcastChannelUpdate event"),
                }
+
+               // If we provide a channel_id not associated with the peer, we should get an error and no updates
+               // should be applied to ensure update atomicity as specified in the API docs.
+               let bad_channel_id = [10; 32];
+               let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths;
+               let new_fee = current_fee + 100;
+               assert!(
+                       matches!(
+                               nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate {
+                                       forwarding_fee_proportional_millionths: Some(new_fee),
+                                       ..Default::default()
+                               }),
+                               Err(APIError::ChannelUnavailable { err: _ }),
+                       )
+               );
+               // Check that the fee hasn't changed for the channel that exists.
+               assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee);
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 0);
        }
 }