use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
-use crate::ln::features::InvoiceFeatures;
+use crate::ln::features::Bolt11InvoiceFeatures;
use crate::routing::gossip::NetworkGraph;
-use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteHop, RouteParameters, Router};
+use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
use crate::ln::msgs;
use crate::ln::onion_utils;
funding_txo: OutPoint,
update: ChannelMonitorUpdate
},
+ /// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
+ /// them marked pending, thus we need to run any [`MonitorUpdateCompletionAction`] (s) pending
+ /// on a channel.
+ MonitorUpdatesComplete {
+ counterparty_node_id: PublicKey,
+ channel_id: [u8; 32],
+ },
}
#[derive(Debug)]
&& self.in_flight_monitor_updates.is_empty()
}
- // Returns a count of all channels we have with this peer, including pending channels.
+ // Returns a count of all channels we have with this peer, including unfunded channels.
fn total_channel_count(&self) -> usize {
self.channel_by_id.len() +
self.outbound_v1_channel_by_id.len() +
},
}
};
- ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, PREFUNDED) => {
+ ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, UNFUNDED) => {
match $err {
- // We should only ever have `ChannelError::Close` when prefunded channels error.
+ // We should only ever have `ChannelError::Close` when unfunded channels error.
// In any case, just close the channel.
ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => {
- log_error!($self.logger, "Closing prefunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg);
+ log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg);
update_maps_on_chan_removal!($self, &$channel_context);
let shutdown_res = $channel_context.force_shutdown(false);
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(),
match $res {
Ok(res) => res,
Err(e) => {
- let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), PREFUNDED);
+ let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), UNFUNDED);
if drop {
$entry.remove_entry();
}
let mut pending_events = $self.pending_events.lock().unwrap();
pending_events.drain(..num_events);
processed_all_events = pending_events.is_empty();
+ // Note that `push_pending_forwards_ev` relies on `pending_events_processor` being
+ // updated here with the `pending_events` lock acquired.
$self.pending_events_processor.store(false, Ordering::Release);
}
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ // Only `Channels` in the channel_by_id map can be considered funded.
for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
peer_state.latest_features.clone(), &self.fee_estimator);
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let features = &peer_state.latest_features;
+ let chan_context_to_details = |context| {
+ ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator)
+ };
return peer_state.channel_by_id
.iter()
- .map(|(_, channel)|
- ChannelDetails::from_channel_context(&channel.context, best_block_height,
- features.clone(), &self.fee_estimator))
+ .map(|(_, channel)| &channel.context)
+ .chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
+ .chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
+ .map(chan_context_to_details)
.collect();
}
vec![]
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
let result: Result<(), _> = loop {
- let per_peer_state = self.per_peer_state.read().unwrap();
+ {
+ let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex = per_peer_state.get(counterparty_node_id)
- .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(channel_id.clone()) {
- hash_map::Entry::Occupied(mut chan_entry) => {
- let funding_txo_opt = chan_entry.get().context.get_funding_txo();
- let their_features = &peer_state.latest_features;
- let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
- .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
- failed_htlcs = htlcs;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
- // We can send the `shutdown` message before updating the `ChannelMonitor`
- // here as we don't need the monitor update to complete until we send a
- // `shutdown_signed`, which we'll delay if we're pending a monitor update.
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg: shutdown_msg,
- });
+ match peer_state.channel_by_id.entry(channel_id.clone()) {
+ hash_map::Entry::Occupied(mut chan_entry) => {
+ let funding_txo_opt = chan_entry.get().context.get_funding_txo();
+ let their_features = &peer_state.latest_features;
+ let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
+ .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
+ failed_htlcs = htlcs;
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update_opt.take() {
- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
- peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
- }
+ // We can send the `shutdown` message before updating the `ChannelMonitor`
+ // here as we don't need the monitor update to complete until we send a
+ // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg: shutdown_msg,
+ });
- if chan_entry.get().is_shutdown() {
- let channel = remove_channel!(self, chan_entry);
- if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: channel_update
- });
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update_opt.take() {
+ break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
}
- self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
- }
- break Ok(());
- },
- hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), counterparty_node_id) })
+
+ if chan_entry.get().is_shutdown() {
+ let channel = remove_channel!(self, chan_entry);
+ if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: channel_update
+ });
+ }
+ self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
+ }
+ break Ok(());
+ },
+ hash_map::Entry::Vacant(_) => (),
+ }
}
+ // If we reach this point, it means that the channel_id either refers to an unfunded channel or
+ // it does not exist for this peer. Either way, we can attempt to force-close it.
+ //
+ // An appropriate error will be returned for non-existence of the channel if that's the case.
+ return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
+ // TODO(dunxen): This is still not ideal as we're doing some extra lookups.
+ // Fix this with https://github.com/lightningdevkit/rust-lightning/issues/2422
};
for htlc_source in failed_htlcs.drain(..) {
self.issue_channel_close_events(&chan.get().context, closure_reason);
let mut chan = remove_channel!(self, chan);
self.finish_force_close_channel(chan.context.force_shutdown(false));
- // Prefunded channel has no update
+ // Unfunded channel has no update
(None, chan.context.get_counterparty_node_id())
} else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) {
log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
self.issue_channel_close_events(&chan.get().context, closure_reason);
let mut chan = remove_channel!(self, chan);
self.finish_force_close_channel(chan.context.force_shutdown(false));
- // Prefunded channel has no update
+ // Unfunded channel has no update
(None, chan.context.get_counterparty_node_id())
} else {
return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
/// irrevocably committed to on our end. In such a case, do NOT retry the payment with a
/// different route unless you intend to pay twice!
///
+ /// [`RouteHop`]: crate::routing::router::RouteHop
/// [`Event::PaymentSent`]: events::Event::PaymentSent
/// [`Event::PaymentFailed`]: events::Event::PaymentFailed
/// [`UpdateHTLCs`]: events::MessageSendEvent::UpdateHTLCs
Some(chan) => {
let funding_txo = find_funding_output(&chan, &funding_transaction)?;
- let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
+ let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger)
.map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
let channel_id = chan.context.channel_id();
let user_id = chan.context.get_user_id();
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for channel_id in channel_ids {
- if !peer_state.channel_by_id.contains_key(channel_id) {
+ if !peer_state.has_channel(channel_id) {
return Err(APIError::ChannelUnavailable {
err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id),
});
- }
+ };
}
for channel_id in channel_ids {
- let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
- let mut config = channel.context.config();
- config.apply(config_update);
- if !channel.context.update_config(&config) {
+ if let Some(channel) = peer_state.channel_by_id.get_mut(channel_id) {
+ let mut config = channel.context.config();
+ config.apply(config_update);
+ if !channel.context.update_config(&config) {
+ continue;
+ }
+ if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+ } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: channel.context.get_counterparty_node_id(),
+ msg,
+ });
+ }
continue;
}
- if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
- } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
- node_id: channel.context.get_counterparty_node_id(),
- msg,
+
+ let context = if let Some(channel) = peer_state.inbound_v1_channel_by_id.get_mut(channel_id) {
+ &mut channel.context
+ } else if let Some(channel) = peer_state.outbound_v1_channel_by_id.get_mut(channel_id) {
+ &mut channel.context
+ } else {
+ // This should not be reachable as we've already checked for non-existence in the previous channel_id loop.
+ debug_assert!(false);
+ return Err(APIError::ChannelUnavailable {
+ err: format!(
+ "Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!",
+ log_bytes!(*channel_id), counterparty_node_id),
});
- }
+ };
+ let mut config = context.config();
+ config.apply(config_update);
+ // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready`
+ // which would be the case for pending inbound/outbound channels.
+ context.update_config(&config);
}
Ok(())
}
}
let _ = handle_error!(self, res, counterparty_node_id);
},
+ BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ if let Some(chan) = peer_state.channel_by_id.get_mut(&channel_id) {
+ handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
+ } else {
+ let update_actions = peer_state.monitor_update_blocked_actions
+ .remove(&channel_id).unwrap_or(Vec::new());
+ mem::drop(peer_state_lock);
+ mem::drop(per_peer_state);
+ self.handle_monitor_update_completion_actions(update_actions);
+ }
+ }
+ },
}
}
NotifyOption::DoPersist
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
let mut should_persist = self.process_background_events();
- let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum);
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
+ let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ min_mempool_feerate
+ } else {
+ normal_feerate
+ };
let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
}
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
let mut should_persist = self.process_background_events();
- let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum);
let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
let mut timed_out_mpp_htlcs = Vec::new();
let pending_msg_events = &mut peer_state.pending_msg_events;
let counterparty_node_id = *counterparty_node_id;
peer_state.channel_by_id.retain(|chan_id, chan| {
+ let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ min_mempool_feerate
+ } else {
+ normal_feerate
+ };
let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
if peer_state_mutex_opt.is_none() { return }
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- let mut channel = {
- match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
- hash_map::Entry::Occupied(chan) => chan,
- hash_map::Entry::Vacant(_) => return,
- }
- };
+ let channel =
+ if let Some(chan) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
+ chan
+ } else {
+ let update_actions = peer_state.monitor_update_blocked_actions
+ .remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new());
+ mem::drop(peer_state_lock);
+ mem::drop(per_peer_state);
+ self.handle_monitor_update_completion_actions(update_actions);
+ return;
+ };
let remaining_in_flight =
if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) {
pending.retain(|upd| upd.update_id > highest_applied_update_id);
pending.len()
} else { 0 };
log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
- highest_applied_update_id, channel.get().context.get_latest_monitor_update_id(),
+ highest_applied_update_id, channel.context.get_latest_monitor_update_id(),
remaining_in_flight);
- if !channel.get().is_awaiting_monitor_update() || channel.get().context.get_latest_monitor_update_id() != highest_applied_update_id {
+ if !channel.is_awaiting_monitor_update() || channel.context.get_latest_monitor_update_id() != highest_applied_update_id {
return;
}
- handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel.get_mut());
+ handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel);
}
/// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
- hash_map::Entry::Occupied(mut chan_entry) => {
-
- if !chan_entry.get().received_shutdown() {
- log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
- log_bytes!(msg.channel_id),
- if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
- }
+ // TODO(dunxen): Fix this duplication when we switch to a single map with enums as per
+ // https://github.com/lightningdevkit/rust-lightning/issues/2422
+ if let hash_map::Entry::Occupied(chan_entry) = peer_state.outbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
+ log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..]));
+ self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
+ let mut chan = remove_channel!(self, chan_entry);
+ self.finish_force_close_channel(chan.context.force_shutdown(false));
+ return Ok(());
+ } else if let hash_map::Entry::Occupied(chan_entry) = peer_state.inbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
+ log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..]));
+ self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
+ let mut chan = remove_channel!(self, chan_entry);
+ self.finish_force_close_channel(chan.context.force_shutdown(false));
+ return Ok(());
+ } else if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
+ if !chan_entry.get().received_shutdown() {
+ log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+ log_bytes!(msg.channel_id),
+ if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
+ }
- let funding_txo_opt = chan_entry.get().context.get_funding_txo();
- let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
- chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
- dropped_htlcs = htlcs;
+ let funding_txo_opt = chan_entry.get().context.get_funding_txo();
+ let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
+ chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
+ dropped_htlcs = htlcs;
- if let Some(msg) = shutdown {
- // We can send the `shutdown` message before updating the `ChannelMonitor`
- // here as we don't need the monitor update to complete until we send a
- // `shutdown_signed`, which we'll delay if we're pending a monitor update.
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg,
- });
- }
+ if let Some(msg) = shutdown {
+ // We can send the `shutdown` message before updating the `ChannelMonitor`
+ // here as we don't need the monitor update to complete until we send a
+ // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ }
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update_opt {
- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
- peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
- }
- break Ok(());
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update_opt {
+ break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
+ }
+ break Ok(());
+ } else {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
for htlc_source in dropped_htlcs.drain(..) {
}
}
- // We only want to push a PendingHTLCsForwardable event if no others are queued.
fn push_pending_forwards_ev(&self) {
let mut pending_events = self.pending_events.lock().unwrap();
- let forward_ev_exists = pending_events.iter()
- .find(|(ev, _)| if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false })
- .is_some();
- if !forward_ev_exists {
- pending_events.push_back((events::Event::PendingHTLCsForwardable {
- time_forwardable:
- Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
+ let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
+ let num_forward_events = pending_events.iter().filter(|(ev, _)|
+ if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false }
+ ).count();
+ // We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
+ // events is done in batches and they are not removed until we're done processing each
+ // batch. Since handling a `PendingHTLCsForwardable` event will call back into the
+ // `ChannelManager`, we'll still see the original forwarding event not removed. Phantom
+ // payments will need an additional forwarding event before being claimed to make them look
+ // real by taking more time.
+ if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 {
+ pending_events.push_back((Event::PendingHTLCsForwardable {
+ time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
}, None));
}
}
provided_node_features(&self.default_configuration)
}
- /// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by
+ /// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by
/// [`ChannelManager`].
///
/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
/// or not. Thus, this method is not public.
#[cfg(any(feature = "_test_utils", test))]
- pub fn invoice_features(&self) -> InvoiceFeatures {
+ pub fn invoice_features(&self) -> Bolt11InvoiceFeatures {
provided_invoice_features(&self.default_configuration)
}
log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
let per_peer_state = self.per_peer_state.read().unwrap();
- for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+ if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
- peer_state.channel_by_id.retain(|_, chan| {
- let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id {
- if !chan.context.have_received_message() {
- // If we created this (outbound) channel while we were disconnected from the
- // peer we probably failed to send the open_channel message, which is now
- // lost. We can't have had anything pending related to this channel, so we just
- // drop it.
- false
- } else {
- pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
- node_id: chan.context.get_counterparty_node_id(),
- msg: chan.get_channel_reestablish(&self.logger),
- });
- true
- }
- } else { true };
- if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id {
- if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) {
- if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
- pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
- node_id: *counterparty_node_id,
- msg, update_msg,
- });
- }
- }
- }
- retain
+
+ // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
+ // (so won't be recovered after a crash) we don't need to bother closing unfunded channels and
+ // clearing their maps here. Instead we can just send queue channel_reestablish messages for
+ // channels in the channel_by_id map.
+ peer_state.channel_by_id.iter_mut().for_each(|(_, chan)| {
+ pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+ node_id: chan.context.get_counterparty_node_id(),
+ msg: chan.get_channel_reestablish(&self.logger),
+ });
});
}
//TODO: Also re-broadcast announcement_signatures
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) {
- if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) {
+ if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
node_id: *counterparty_node_id,
msg,
provided_init_features(config).to_context()
}
-/// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by
+/// Fetches the set of [`Bolt11InvoiceFeatures`] flags which are provided by or required by
/// [`ChannelManager`].
///
/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
/// or not. Thus, this method is not public.
#[cfg(any(feature = "_test_utils", test))]
-pub(crate) fn provided_invoice_features(config: &UserConfig) -> InvoiceFeatures {
+pub(crate) fn provided_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures {
provided_init_features(config).to_context()
}
}
impl_writeable_tlv_based!(PhantomRouteHints, {
- (2, channels, vec_type),
+ (2, channels, required_vec),
(4, phantom_scid, required),
(6, real_node_pubkey, required),
});
0 => {
let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
let mut first_hop_htlc_msat: u64 = 0;
- let mut path_hops: Option<Vec<RouteHop>> = Some(Vec::new());
+ let mut path_hops = Vec::new();
let mut payment_id = None;
let mut payment_params: Option<PaymentParameters> = None;
let mut blinded_tail: Option<BlindedTail> = None;
(0, session_priv, required),
(1, payment_id, option),
(2, first_hop_htlc_msat, required),
- (4, path_hops, vec_type),
+ (4, path_hops, required_vec),
(5, payment_params, (option: ReadableArgs, 0)),
(6, blinded_tail, option),
});
// instead.
payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
}
- let path = Path { hops: path_hops.ok_or(DecodeError::InvalidValue)?, blinded_tail };
+ let path = Path { hops: path_hops, blinded_tail };
if path.hops.len() == 0 {
return Err(DecodeError::InvalidValue);
}
(1, payment_id_opt, option),
(2, first_hop_htlc_msat, required),
// 3 was previously used to write a PaymentSecret for the payment.
- (4, path.hops, vec_type),
+ (4, path.hops, required_vec),
(5, None::<PaymentParameters>, option), // payment_params in LDK versions prior to 0.0.115
(6, path.blinded_tail, option),
});
(6, monitor_update_blocked_actions_per_peer, option),
(7, self.fake_scid_rand_bytes, required),
(8, if events_not_backwards_compatible { Some(&*events) } else { None }, option),
- (9, htlc_purposes, vec_type),
+ (9, htlc_purposes, required_vec),
(10, in_flight_monitor_updates, option),
(11, self.probing_cookie_secret, required),
(13, htlc_onion_fields, optional_vec),
(6, monitor_update_blocked_actions_per_peer, option),
(7, fake_scid_rand_bytes, option),
(8, events_override, option),
- (9, claimable_htlc_purposes, vec_type),
+ (9, claimable_htlc_purposes, optional_vec),
(10, in_flight_monitor_updates, option),
(11, probing_cookie_secret, option),
(13, claimable_htlc_onion_fields, optional_vec),
update: update.clone(),
});
}
+ if $chan_in_flight_upds.is_empty() {
+ // We had some updates to apply, but it turns out they had completed before we
+ // were serialized, we just weren't notified of that. Thus, we may have to run
+ // the completion actions for any monitor updates, but otherwise are done.
+ pending_background_events.push(
+ BackgroundEvent::MonitorUpdatesComplete {
+ counterparty_node_id: $counterparty_node_id,
+ channel_id: $funding_txo.to_channel_id(),
+ });
+ }
if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
log_error!(args.logger, "Duplicate in-flight monitor update set for the same channel!");
return Err(DecodeError::InvalidValue);
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
.entry(blocked_channel_outpoint.to_channel_id())
.or_insert_with(Vec::new).push(blocking_action.clone());
+ } else {
+ // If the channel we were blocking has closed, we don't need to
+ // worry about it - the blocked monitor update should never have
+ // been released from the `Channel` object so it can't have
+ // completed, and if the channel closed there's no reason to bother
+ // anymore.
}
}
}
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("expected BroadcastChannelUpdate event"),
}
+
+ // If we provide a channel_id not associated with the peer, we should get an error and no updates
+ // should be applied to ensure update atomicity as specified in the API docs.
+ let bad_channel_id = [10; 32];
+ let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths;
+ let new_fee = current_fee + 100;
+ assert!(
+ matches!(
+ nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate {
+ forwarding_fee_proportional_millionths: Some(new_fee),
+ ..Default::default()
+ }),
+ Err(APIError::ChannelUnavailable { err: _ }),
+ )
+ );
+ // Check that the fee hasn't changed for the channel that exists.
+ assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 0);
}
}