X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=75ca8b1376cfa67225461e975ce270ec8a963d4e;hb=a70b1f74ced56013c32df1a60b38a7d9269bd0ce;hp=7ec75e20fa36a6f9fdbb00f57a68fa6160d1dc88;hpb=550cf91439a2aa2befe89704e13c781800e8612e;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7ec75e20..75ca8b13 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -43,9 +43,9 @@ use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret}; use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel}; use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] -use crate::ln::features::InvoiceFeatures; +use crate::ln::features::Bolt11InvoiceFeatures; use crate::routing::gossip::NetworkGraph; -use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteHop, RouteParameters, Router}; +use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router}; use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; use crate::ln::msgs; use crate::ln::onion_utils; @@ -685,7 +685,7 @@ impl PeerState { && self.in_flight_monitor_updates.is_empty() } - // Returns a count of all channels we have with this peer, including pending channels. + // Returns a count of all channels we have with this peer, including unfunded channels. fn total_channel_count(&self) -> usize { self.channel_by_id.len() + self.outbound_v1_channel_by_id.len() + @@ -1749,12 +1749,12 @@ macro_rules! convert_chan_err { }, } }; - ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, PREFUNDED) => { + ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, UNFUNDED) => { match $err { - // We should only ever have `ChannelError::Close` when prefunded channels error. + // We should only ever have `ChannelError::Close` when unfunded channels error. // In any case, just close the channel. ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => { - log_error!($self.logger, "Closing prefunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg); + log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg); update_maps_on_chan_removal!($self, &$channel_context); let shutdown_res = $channel_context.force_shutdown(false); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(), @@ -1784,7 +1784,7 @@ macro_rules! try_v1_outbound_chan_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), PREFUNDED); + let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), UNFUNDED); if drop { $entry.remove_entry(); } @@ -2027,6 +2027,8 @@ macro_rules! process_events_body { let mut pending_events = $self.pending_events.lock().unwrap(); pending_events.drain(..num_events); processed_all_events = pending_events.is_empty(); + // Note that `push_pending_forwards_ev` relies on `pending_events_processor` being + // updated here with the `pending_events` lock acquired. $self.pending_events_processor.store(false, Ordering::Release); } @@ -2244,6 +2246,7 @@ where for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + // Only `Channels` in the channel_by_id map can be considered funded. for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) { let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, peer_state.latest_features.clone(), &self.fee_estimator); @@ -2312,11 +2315,15 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let features = &peer_state.latest_features; + let chan_context_to_details = |context| { + ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator) + }; return peer_state.channel_by_id .iter() - .map(|(_, channel)| - ChannelDetails::from_channel_context(&channel.context, best_block_height, - features.clone(), &self.fee_estimator)) + .map(|(_, channel)| &channel.context) + .chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context)) + .chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context)) + .map(chan_context_to_details) .collect(); } vec![] @@ -2373,48 +2380,58 @@ where let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; let result: Result<(), _> = loop { - let per_peer_state = self.per_peer_state.read().unwrap(); + { + let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id) + .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - let funding_txo_opt = chan_entry.get().context.get_funding_txo(); - let their_features = &peer_state.latest_features; - let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut() - .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; - failed_htlcs = htlcs; - - // We can send the `shutdown` message before updating the `ChannelMonitor` - // here as we don't need the monitor update to complete until we send a - // `shutdown_signed`, which we'll delay if we're pending a monitor update. - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg: shutdown_msg, - }); + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update_opt.take() { - break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ()); - } + match peer_state.channel_by_id.entry(channel_id.clone()) { + hash_map::Entry::Occupied(mut chan_entry) => { + let funding_txo_opt = chan_entry.get().context.get_funding_txo(); + let their_features = &peer_state.latest_features; + let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut() + .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; + failed_htlcs = htlcs; - if chan_entry.get().is_shutdown() { - let channel = remove_channel!(self, chan_entry); - if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: channel_update - }); + // We can send the `shutdown` message before updating the `ChannelMonitor` + // here as we don't need the monitor update to complete until we send a + // `shutdown_signed`, which we'll delay if we're pending a monitor update. + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg: shutdown_msg, + }); + + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update_opt.take() { + break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, + peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ()); } - self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed); - } - break Ok(()); - }, - hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), counterparty_node_id) }) + + if chan_entry.get().is_shutdown() { + let channel = remove_channel!(self, chan_entry); + if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: channel_update + }); + } + self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed); + } + break Ok(()); + }, + hash_map::Entry::Vacant(_) => (), + } } + // If we reach this point, it means that the channel_id either refers to an unfunded channel or + // it does not exist for this peer. Either way, we can attempt to force-close it. + // + // An appropriate error will be returned for non-existence of the channel if that's the case. + return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ()) + // TODO(dunxen): This is still not ideal as we're doing some extra lookups. + // Fix this with https://github.com/lightningdevkit/rust-lightning/issues/2422 }; for htlc_source in failed_htlcs.drain(..) { @@ -2533,14 +2550,14 @@ where self.issue_channel_close_events(&chan.get().context, closure_reason); let mut chan = remove_channel!(self, chan); self.finish_force_close_channel(chan.context.force_shutdown(false)); - // Prefunded channel has no update + // Unfunded channel has no update (None, chan.context.get_counterparty_node_id()) } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) { log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); self.issue_channel_close_events(&chan.get().context, closure_reason); let mut chan = remove_channel!(self, chan); self.finish_force_close_channel(chan.context.force_shutdown(false)); - // Prefunded channel has no update + // Unfunded channel has no update (None, chan.context.get_counterparty_node_id()) } else { return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) }); @@ -3203,6 +3220,7 @@ where /// irrevocably committed to on our end. In such a case, do NOT retry the payment with a /// different route unless you intend to pay twice! /// + /// [`RouteHop`]: crate::routing::router::RouteHop /// [`Event::PaymentSent`]: events::Event::PaymentSent /// [`Event::PaymentFailed`]: events::Event::PaymentFailed /// [`UpdateHTLCs`]: events::MessageSendEvent::UpdateHTLCs @@ -3520,27 +3538,48 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for channel_id in channel_ids { - if !peer_state.channel_by_id.contains_key(channel_id) { + if !peer_state.has_channel(channel_id) { return Err(APIError::ChannelUnavailable { err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id), }); - } + }; } for channel_id in channel_ids { - let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap(); - let mut config = channel.context.config(); - config.apply(config_update); - if !channel.context.update_config(&config) { + if let Some(channel) = peer_state.channel_by_id.get_mut(channel_id) { + let mut config = channel.context.config(); + config.apply(config_update); + if !channel.context.update_config(&config) { + continue; + } + if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); + } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.context.get_counterparty_node_id(), + msg, + }); + } continue; } - if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); - } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.context.get_counterparty_node_id(), - msg, + + let context = if let Some(channel) = peer_state.inbound_v1_channel_by_id.get_mut(channel_id) { + &mut channel.context + } else if let Some(channel) = peer_state.outbound_v1_channel_by_id.get_mut(channel_id) { + &mut channel.context + } else { + // This should not be reachable as we've already checked for non-existence in the previous channel_id loop. + debug_assert!(false); + return Err(APIError::ChannelUnavailable { + err: format!( + "Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!", + log_bytes!(*channel_id), counterparty_node_id), }); - } + }; + let mut config = context.config(); + config.apply(config_update); + // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready` + // which would be the case for pending inbound/outbound channels. + context.update_config(&config); } Ok(()) } @@ -4255,13 +4294,19 @@ where PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = self.process_background_events(); - let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum); let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for (chan_id, chan) in peer_state.channel_by_id.iter_mut() { + let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + min_mempool_feerate + } else { + normal_feerate + }; let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } } @@ -4291,7 +4336,8 @@ where PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = self.process_background_events(); - let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum); let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); @@ -4304,6 +4350,11 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; let counterparty_node_id = *counterparty_node_id; peer_state.channel_by_id.retain(|chan_id, chan| { + let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + min_mempool_feerate + } else { + normal_feerate + }; let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } @@ -5793,16 +5844,21 @@ where } } - // We only want to push a PendingHTLCsForwardable event if no others are queued. fn push_pending_forwards_ev(&self) { let mut pending_events = self.pending_events.lock().unwrap(); - let forward_ev_exists = pending_events.iter() - .find(|(ev, _)| if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false }) - .is_some(); - if !forward_ev_exists { - pending_events.push_back((events::Event::PendingHTLCsForwardable { - time_forwardable: - Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS), + let is_processing_events = self.pending_events_processor.load(Ordering::Acquire); + let num_forward_events = pending_events.iter().filter(|(ev, _)| + if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false } + ).count(); + // We only want to push a PendingHTLCsForwardable event if no others are queued. Processing + // events is done in batches and they are not removed until we're done processing each + // batch. Since handling a `PendingHTLCsForwardable` event will call back into the + // `ChannelManager`, we'll still see the original forwarding event not removed. Phantom + // payments will need an additional forwarding event before being claimed to make them look + // real by taking more time. + if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 { + pending_events.push_back((Event::PendingHTLCsForwardable { + time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS), }, None)); } } @@ -6920,13 +6976,13 @@ where provided_node_features(&self.default_configuration) } - /// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by + /// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by /// [`ChannelManager`]. /// /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" /// or not. Thus, this method is not public. #[cfg(any(feature = "_test_utils", test))] - pub fn invoice_features(&self) -> InvoiceFeatures { + pub fn invoice_features(&self) -> Bolt11InvoiceFeatures { provided_invoice_features(&self.default_configuration) } @@ -7206,37 +7262,20 @@ where log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id)); let per_peer_state = self.per_peer_state.read().unwrap(); - for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; - peer_state.channel_by_id.retain(|_, chan| { - let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id { - if !chan.context.have_received_message() { - // If we created this (outbound) channel while we were disconnected from the - // peer we probably failed to send the open_channel message, which is now - // lost. We can't have had anything pending related to this channel, so we just - // drop it. - false - } else { - pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { - node_id: chan.context.get_counterparty_node_id(), - msg: chan.get_channel_reestablish(&self.logger), - }); - true - } - } else { true }; - if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id { - if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) { - if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) { - pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement { - node_id: *counterparty_node_id, - msg, update_msg, - }); - } - } - } - retain + + // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted + // (so won't be recovered after a crash) we don't need to bother closing unfunded channels and + // clearing their maps here. Instead we can just send queue channel_reestablish messages for + // channels in the channel_by_id map. + peer_state.channel_by_id.iter_mut().for_each(|(_, chan)| { + pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { + node_id: chan.context.get_counterparty_node_id(), + msg: chan.get_channel_reestablish(&self.logger), + }); }); } //TODO: Also re-broadcast announcement_signatures @@ -7270,7 +7309,7 @@ where let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) { - if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { + if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: *counterparty_node_id, msg, @@ -7358,13 +7397,13 @@ pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures { provided_init_features(config).to_context() } -/// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by +/// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by /// [`ChannelManager`]. /// /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" /// or not. Thus, this method is not public. #[cfg(any(feature = "_test_utils", test))] -pub(crate) fn provided_invoice_features(config: &UserConfig) -> InvoiceFeatures { +pub(crate) fn provided_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures { provided_init_features(config).to_context() } @@ -7536,7 +7575,7 @@ impl Readable for ChannelDetails { } impl_writeable_tlv_based!(PhantomRouteHints, { - (2, channels, vec_type), + (2, channels, required_vec), (4, phantom_scid, required), (6, real_node_pubkey, required), }); @@ -7728,7 +7767,7 @@ impl Readable for HTLCSource { 0 => { let mut session_priv: crate::util::ser::RequiredWrapper = crate::util::ser::RequiredWrapper(None); let mut first_hop_htlc_msat: u64 = 0; - let mut path_hops: Option> = Some(Vec::new()); + let mut path_hops = Vec::new(); let mut payment_id = None; let mut payment_params: Option = None; let mut blinded_tail: Option = None; @@ -7736,7 +7775,7 @@ impl Readable for HTLCSource { (0, session_priv, required), (1, payment_id, option), (2, first_hop_htlc_msat, required), - (4, path_hops, vec_type), + (4, path_hops, required_vec), (5, payment_params, (option: ReadableArgs, 0)), (6, blinded_tail, option), }); @@ -7745,7 +7784,7 @@ impl Readable for HTLCSource { // instead. payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref())); } - let path = Path { hops: path_hops.ok_or(DecodeError::InvalidValue)?, blinded_tail }; + let path = Path { hops: path_hops, blinded_tail }; if path.hops.len() == 0 { return Err(DecodeError::InvalidValue); } @@ -7780,7 +7819,7 @@ impl Writeable for HTLCSource { (1, payment_id_opt, option), (2, first_hop_htlc_msat, required), // 3 was previously used to write a PaymentSecret for the payment. - (4, path.hops, vec_type), + (4, path.hops, required_vec), (5, None::, option), // payment_params in LDK versions prior to 0.0.115 (6, path.blinded_tail, option), }); @@ -8030,7 +8069,7 @@ where (6, monitor_update_blocked_actions_per_peer, option), (7, self.fake_scid_rand_bytes, required), (8, if events_not_backwards_compatible { Some(&*events) } else { None }, option), - (9, htlc_purposes, vec_type), + (9, htlc_purposes, required_vec), (10, in_flight_monitor_updates, option), (11, self.probing_cookie_secret, required), (13, htlc_onion_fields, optional_vec), @@ -8477,7 +8516,7 @@ where (6, monitor_update_blocked_actions_per_peer, option), (7, fake_scid_rand_bytes, option), (8, events_override, option), - (9, claimable_htlc_purposes, vec_type), + (9, claimable_htlc_purposes, optional_vec), (10, in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), @@ -10166,6 +10205,25 @@ mod tests { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("expected BroadcastChannelUpdate event"), } + + // If we provide a channel_id not associated with the peer, we should get an error and no updates + // should be applied to ensure update atomicity as specified in the API docs. + let bad_channel_id = [10; 32]; + let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths; + let new_fee = current_fee + 100; + assert!( + matches!( + nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate { + forwarding_fee_proportional_millionths: Some(new_fee), + ..Default::default() + }), + Err(APIError::ChannelUnavailable { err: _ }), + ) + ); + // Check that the fee hasn't changed for the channel that exists. + assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); } }