use crate::ln::features::InvoiceFeatures;
use crate::routing::gossip::NetworkGraph;
use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteHop, RouteParameters, Router};
-use crate::routing::scoring::ProbabilisticScorer;
+use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
use crate::ln::msgs;
use crate::ln::onion_utils;
use crate::ln::onion_utils::HTLCFailReason;
use crate::ln::outbound_payment;
use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment};
use crate::ln::wire::Encode;
-use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
+use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
use crate::util::config::{UserConfig, ChannelConfig};
use crate::util::wakers::{Future, Notifier};
use crate::util::scid_utils::fake_scid;
// Re-export this for use in the public API.
pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry, RetryableSendFailure, RecipientOnionFields};
+use crate::ln::script::ShutdownScript;
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
/// for some reason. They are handled in timer_tick_occurred, so may be processed with
/// quite some time lag.
enum BackgroundEvent {
- /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
- /// commitment transaction.
- ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
+ /// Handle a ChannelMonitorUpdate
+ ///
+ /// Note that any such events are lost on shutdown, so in general they must be updates which
+ /// are regenerated on startup.
+ MonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
}
#[derive(Debug)]
(2, EmitEvent) => { (0, event, upgradable_required) },
);
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub(crate) enum EventCompletionAction {
+ ReleaseRAAChannelMonitorUpdate {
+ counterparty_node_id: PublicKey,
+ channel_funding_outpoint: OutPoint,
+ },
+}
+impl_writeable_tlv_based_enum!(EventCompletionAction,
+ (0, ReleaseRAAChannelMonitorUpdate) => {
+ (0, channel_funding_outpoint, required),
+ (2, counterparty_node_id, required),
+ };
+);
+
/// State we hold per-peer.
pub(super) struct PeerState<Signer: ChannelSigner> {
/// `temporary_channel_id` or `channel_id` -> `channel`.
Arc<DefaultRouter<
Arc<NetworkGraph<Arc<L>>>,
Arc<L>,
- Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>
+ Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
+ ProbabilisticScoringFeeParameters,
+ ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
>>,
Arc<L>
>;
/// of [`KeysManager`] and [`DefaultRouter`].
///
/// This is not exported to bindings users as Arcs don't make sense in bindings
-pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
+pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, &'g L>;
/// A trivial trait which describes any [`ChannelManager`] used in testing.
#[cfg(any(test, feature = "_test_utils"))]
#[cfg(any(test, feature = "_test_utils"))]
pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<<SP::Target as SignerProvider>::Signer>>>>,
+ /// The set of events which we need to give to the user to handle. In some cases an event may
+ /// require some further action after the user handles it (currently only blocking a monitor
+ /// update from being handed to the user to ensure the included changes to the channel state
+ /// are handled by the user before they're persisted durably to disk). In that case, the second
+ /// element in the tuple is set to `Some` with further details of the action.
+ ///
+ /// Note that events MUST NOT be removed from pending_events after deserialization, as they
+ /// could be in the middle of being processed without the direct mutex held.
+ ///
/// See `ChannelManager` struct-level documentation for lock order requirements.
- pending_events: Mutex<Vec<events::Event>>,
+ pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
/// A simple atomic flag to ensure only one task at a time can be processing events asynchronously.
pending_events_processor: AtomicBool,
/// See `ChannelManager` struct-level documentation for lock order requirements.
/// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
/// to use a limit as close as possible to the HTLC limit we can currently send.
///
- /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`].
+ /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`],
+ /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`].
pub next_outbound_htlc_limit_msat: u64,
+ /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of
+ /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than
+ /// an upper-bound. This is intended for use when routing, allowing us to ensure we pick a
+ /// route which is valid.
+ pub next_outbound_htlc_minimum_msat: u64,
/// The available inbound capacity for the remote peer to send HTLCs to us. This does not
/// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
/// available for inclusion in new inbound HTLCs).
inbound_capacity_msat: balance.inbound_capacity_msat,
outbound_capacity_msat: balance.outbound_capacity_msat,
next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
+ next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat,
user_channel_id: channel.get_user_id(),
confirmations_required: channel.minimum_depth(),
confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
/// Route hints used in constructing invoices for [phantom node payents].
///
-/// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
+/// [phantom node payments]: crate::sign::PhantomKeysManager
#[derive(Clone)]
pub struct PhantomRouteHints {
/// The list of channels to be included in the invoice route hints.
});
}
if let Some((channel_id, user_channel_id)) = chan_id {
- $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed {
+ $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed {
channel_id, user_channel_id,
reason: ClosureReason::ProcessingError { err: err.err.clone() }
- });
+ }, None));
}
}
macro_rules! emit_channel_pending_event {
($locked_events: expr, $channel: expr) => {
if $channel.should_emit_channel_pending_event() {
- $locked_events.push(events::Event::ChannelPending {
+ $locked_events.push_back((events::Event::ChannelPending {
channel_id: $channel.channel_id(),
former_temporary_channel_id: $channel.temporary_channel_id(),
counterparty_node_id: $channel.get_counterparty_node_id(),
user_channel_id: $channel.get_user_id(),
funding_txo: $channel.get_funding_txo().unwrap().into_bitcoin_outpoint(),
- });
+ }, None));
$channel.set_channel_pending_event_emitted();
}
}
($locked_events: expr, $channel: expr) => {
if $channel.should_emit_channel_ready_event() {
debug_assert!($channel.channel_pending_event_emitted());
- $locked_events.push(events::Event::ChannelReady {
+ $locked_events.push_back((events::Event::ChannelReady {
channel_id: $channel.channel_id(),
user_channel_id: $channel.get_user_id(),
counterparty_node_id: $channel.get_counterparty_node_id(),
channel_type: $channel.get_channel_type().clone(),
- });
+ }, None));
$channel.set_channel_ready_event_emitted();
}
}
res
},
ChannelMonitorUpdateStatus::Completed => {
- if ($update_id == 0 || $chan.get_next_monitor_update()
- .expect("We can't be processing a monitor update if it isn't queued")
- .update_id == $update_id) &&
- $chan.get_latest_monitor_update_id() == $update_id
- {
+ $chan.complete_one_mon_update($update_id);
+ if $chan.no_monitor_updates_pending() {
handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
}
Ok(())
result = NotifyOption::DoPersist;
}
- for event in pending_events {
+ let mut post_event_actions = Vec::new();
+
+ for (event, action_opt) in pending_events {
$event_to_handle = event;
$handle_event;
+ if let Some(action) = action_opt {
+ post_event_actions.push(action);
+ }
}
{
$self.pending_events_processor.store(false, Ordering::Release);
}
+ if !post_event_actions.is_empty() {
+ $self.handle_post_event_actions(post_event_actions);
+ // If we had some actions, go around again as we may have more events now
+ processed_all_events = false;
+ }
+
if result == NotifyOption::DoPersist {
$self.persistence_notifier.notify();
}
per_peer_state: FairRwLock::new(HashMap::new()),
- pending_events: Mutex::new(Vec::new()),
+ pending_events: Mutex::new(VecDeque::new()),
pending_events_processor: AtomicBool::new(false),
pending_background_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
let mut pending_events_lock = self.pending_events.lock().unwrap();
match channel.unbroadcasted_funding() {
Some(transaction) => {
- pending_events_lock.push(events::Event::DiscardFunding { channel_id: channel.channel_id(), transaction })
+ pending_events_lock.push_back((events::Event::DiscardFunding {
+ channel_id: channel.channel_id(), transaction
+ }, None));
},
None => {},
}
- pending_events_lock.push(events::Event::ChannelClosed {
+ pending_events_lock.push_back((events::Event::ChannelClosed {
channel_id: channel.channel_id(),
user_channel_id: channel.get_user_id(),
reason: closure_reason
- });
+ }, None));
}
- fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
+ fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
let funding_txo_opt = chan_entry.get().get_funding_txo();
let their_features = &peer_state.latest_features;
let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
- .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight)?;
+ .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
failed_htlcs = htlcs;
// We can send the `shutdown` message before updating the `ChannelMonitor`
/// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
/// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
- self.close_channel_internal(channel_id, counterparty_node_id, None)
+ self.close_channel_internal(channel_id, counterparty_node_id, None, None)
}
/// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
/// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
/// will appear on a force-closure transaction, whichever is lower).
///
+ /// The `shutdown_script` provided will be used as the `scriptPubKey` for the closing transaction.
+ /// Will fail if a shutdown script has already been set for this channel by
+ /// ['ChannelHandshakeConfig::commit_upfront_shutdown_pubkey`]. The given shutdown script must
+ /// also be compatible with our and the counterparty's features.
+ ///
/// May generate a [`SendShutdown`] message event on success, which should be relayed.
///
/// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
/// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
/// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
/// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
- pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
- self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight))
+ pub fn close_channel_with_feerate_and_script(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
+ self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
}
#[inline]
let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
.map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected".to_owned()})?;
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, recipient_onion, cur_height, keysend_preimage)?;
- if onion_utils::route_size_insane(&onion_payloads) {
- return Err(APIError::InvalidRoute{err: "Route size too large considering onion data".to_owned()});
- }
- let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
+
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash)
+ .map_err(|_| APIError::InvalidRoute { err: "Route size too large considering onion data".to_owned()})?;
let err: Result<(), _> = loop {
let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
}
}
self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| {
+ if tx.output.len() > u16::max_value() as usize {
+ return Err(APIError::APIMisuseError {
+ err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
+ });
+ }
+
let mut output_index = None;
let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
for (idx, outp) in tx.output.iter().enumerate() {
err: "Multiple outputs matched the expected script and value".to_owned()
});
}
- if idx > u16::max_value() as usize {
- return Err(APIError::APIMisuseError {
- err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
- });
- }
output_index = Some(idx as u16);
}
}
pub fn process_pending_htlc_forwards(&self) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let mut new_events = Vec::new();
+ let mut new_events = VecDeque::new();
let mut failed_forwards = Vec::new();
let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
{
htlcs.push(claimable_htlc);
let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
- new_events.push(events::Event::PaymentClaimable {
+ new_events.push_back((events::Event::PaymentClaimable {
receiver_node_id: Some(receiver_node_id),
payment_hash,
purpose: purpose(),
via_user_channel_id: Some(prev_user_channel_id),
claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
onion_fields: claimable_payment.onion_fields.clone(),
- });
+ }, None));
payment_claimable_generated = true;
} else {
// Nothing to do - we haven't reached the total
htlcs: vec![claimable_htlc],
});
let prev_channel_id = prev_funding_outpoint.to_channel_id();
- new_events.push(events::Event::PaymentClaimable {
+ new_events.push_back((events::Event::PaymentClaimable {
receiver_node_id: Some(receiver_node_id),
payment_hash,
amount_msat,
via_user_channel_id: Some(prev_user_channel_id),
claim_deadline,
onion_fields: Some(onion_fields),
- });
+ }, None));
},
hash_map::Entry::Occupied(_) => {
log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
for event in background_events.drain(..) {
match event {
- BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => {
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
// The channel has already been closed, so no use bothering to care about the
// monitor updating completing.
let _ = self.chain_monitor.update_channel(funding_txo, &update);
mem::drop(forward_htlcs);
if push_forward_ev { self.push_pending_forwards_ev(); }
let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::HTLCHandlingFailed {
+ pending_events.push_back((events::Event::HTLCHandlingFailed {
prev_channel_id: outpoint.to_channel_id(),
failed_next_destination: destination,
- });
+ }, None));
},
}
}
MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => {
let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, receiver_node_id }) = payment {
- self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
+ self.pending_events.lock().unwrap().push_back((events::Event::PaymentClaimed {
payment_hash, purpose, amount_msat, receiver_node_id: Some(receiver_node_id),
- });
+ }, None));
}
},
MonitorUpdateCompletionAction::EmitEvent { event } => {
- self.pending_events.lock().unwrap().push(event);
+ self.pending_events.lock().unwrap().push_back((event, None));
},
}
}
if let Some(tx) = funding_broadcastable {
log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
- self.tx_broadcaster.broadcast_transaction(&tx);
+ self.tx_broadcaster.broadcast_transactions(&[&tx]);
}
{
});
} else {
let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(
- events::Event::OpenChannelRequest {
- temporary_channel_id: msg.temporary_channel_id.clone(),
- counterparty_node_id: counterparty_node_id.clone(),
- funding_satoshis: msg.funding_satoshis,
- push_msat: msg.push_msat,
- channel_type: channel.get_channel_type().clone(),
- }
- );
+ pending_events.push_back((events::Event::OpenChannelRequest {
+ temporary_channel_id: msg.temporary_channel_id.clone(),
+ counterparty_node_id: counterparty_node_id.clone(),
+ funding_satoshis: msg.funding_satoshis,
+ push_msat: msg.push_msat,
+ channel_type: channel.get_channel_type().clone(),
+ }, None));
}
entry.insert(channel);
}
};
let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::FundingGenerationReady {
+ pending_events.push_back((events::Event::FundingGenerationReady {
temporary_channel_id: msg.temporary_channel_id,
counterparty_node_id: *counterparty_node_id,
channel_value_satoshis: value,
output_script,
user_channel_id: user_id,
- });
+ }, None));
Ok(())
}
};
if let Some(broadcast_tx) = tx {
log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
- self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
+ self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
}
if let Some(chan) = chan_option {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
let funding_txo = chan.get().get_funding_txo();
- let monitor_update = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
- let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
- let update_id = monitor_update.update_id;
- handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
- peer_state, per_peer_state, chan)
+ let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
+ if let Some(monitor_update) = monitor_update_opt {
+ let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
+ let update_id = monitor_update.update_id;
+ handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+ peer_state, per_peer_state, chan)
+ } else { Ok(()) }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
let mut push_forward_event = false;
- let mut new_intercept_events = Vec::new();
+ let mut new_intercept_events = VecDeque::new();
let mut failed_intercept_forwards = Vec::new();
if !pending_forwards.is_empty() {
for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
match pending_intercepts.entry(intercept_id) {
hash_map::Entry::Vacant(entry) => {
- new_intercept_events.push(events::Event::HTLCIntercepted {
+ new_intercept_events.push_back((events::Event::HTLCIntercepted {
requested_next_hop_scid: scid,
payment_hash: forward_info.payment_hash,
inbound_amount_msat: forward_info.incoming_amt_msat.unwrap(),
expected_outbound_amount_msat: forward_info.outgoing_amt_msat,
intercept_id
- });
+ }, None));
entry.insert(PendingAddHTLCInfo {
prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
},
fn push_pending_forwards_ev(&self) {
let mut pending_events = self.pending_events.lock().unwrap();
let forward_ev_exists = pending_events.iter()
- .find(|ev| if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false })
+ .find(|(ev, _)| if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false })
.is_some();
if !forward_ev_exists {
- pending_events.push(events::Event::PendingHTLCsForwardable {
+ pending_events.push_back((events::Event::PendingHTLCsForwardable {
time_forwardable:
Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
- });
+ }, None));
}
}
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
let funding_txo = chan.get().get_funding_txo();
- let (htlcs_to_fail, monitor_update) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
- let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
- let update_id = monitor_update.update_id;
- let res = handle_new_monitor_update!(self, update_res, update_id,
- peer_state_lock, peer_state, per_peer_state, chan);
+ let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
+ let res = if let Some(monitor_update) = monitor_update_opt {
+ let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
+ let update_id = monitor_update.update_id;
+ handle_new_monitor_update!(self, update_res, update_id,
+ peer_state_lock, peer_state, per_peer_state, chan)
+ } else { Ok(()) };
(htlcs_to_fail, res)
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
- self.tx_broadcaster.broadcast_transaction(&tx);
+ self.tx_broadcaster.broadcast_transactions(&[&tx]);
update_maps_on_chan_removal!(self, chan);
false
} else { true }
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
assert!(should_broadcast);
} else { unreachable!(); }
- self.pending_background_events.lock().unwrap().push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)));
+ self.pending_background_events.lock().unwrap().push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)));
}
self.finish_force_close_channel(failure);
}
/// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
/// are used when constructing the phantom invoice's route hints.
///
- /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
+ /// [phantom node payments]: crate::sign::PhantomKeysManager
pub fn get_phantom_scid(&self) -> u64 {
let best_block_height = self.best_block.read().unwrap().height();
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
/// Gets route hints for use in receiving [phantom node payments].
///
- /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
+ /// [phantom node payments]: crate::sign::PhantomKeysManager
pub fn get_phantom_route_hints(&self) -> PhantomRouteHints {
PhantomRouteHints {
channels: self.list_usable_channels(),
#[cfg(feature = "_test_utils")]
pub fn push_pending_event(&self, event: events::Event) {
let mut events = self.pending_events.lock().unwrap();
- events.push(event);
+ events.push_back((event, None));
}
#[cfg(test)]
pub fn pop_pending_event(&self) -> Option<events::Event> {
let mut events = self.pending_events.lock().unwrap();
- if events.is_empty() { None } else { Some(events.remove(0)) }
+ events.pop_front().map(|(e, _)| e)
}
#[cfg(test)]
self.pending_outbound_payments.clear_pending_payments()
}
+ fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint) {
+ let mut errors = Vec::new();
+ loop {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state_lck = peer_state_mtx.lock().unwrap();
+ let peer_state = &mut *peer_state_lck;
+ if self.pending_events.lock().unwrap().iter()
+ .any(|(_ev, action_opt)| action_opt == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
+ channel_funding_outpoint, counterparty_node_id
+ }))
+ {
+ // Check that, while holding the peer lock, we don't have another event
+ // blocking any monitor updates for this channel. If we do, let those
+ // events be the ones that ultimately release the monitor update(s).
+ log_trace!(self.logger, "Delaying monitor unlock for channel {} as another event is pending",
+ log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+ break;
+ }
+ if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
+ debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint);
+ if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
+ log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
+ log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+ let update_res = self.chain_monitor.update_channel(channel_funding_outpoint, monitor_update);
+ let update_id = monitor_update.update_id;
+ if let Err(e) = handle_new_monitor_update!(self, update_res, update_id,
+ peer_state_lck, peer_state, per_peer_state, chan)
+ {
+ errors.push((e, counterparty_node_id));
+ }
+ if further_update_exists {
+ // If there are more `ChannelMonitorUpdate`s to process, restart at the
+ // top of the loop.
+ continue;
+ }
+ } else {
+ log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update",
+ log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+ }
+ }
+ } else {
+ log_debug!(self.logger,
+ "Got a release post-RAA monitor update for peer {} but the channel is gone",
+ log_pubkey!(counterparty_node_id));
+ }
+ break;
+ }
+ for (err, counterparty_node_id) in errors {
+ let res = Err::<(), _>(err);
+ let _ = handle_error!(self, res, counterparty_node_id);
+ }
+ }
+
+ fn handle_post_event_actions(&self, actions: Vec<EventCompletionAction>) {
+ for action in actions {
+ match action {
+ EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
+ channel_funding_outpoint, counterparty_node_id
+ } => {
+ self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint);
+ }
+ }
+ }
+ }
+
/// Processes any events asynchronously in the order they were generated since the last call
/// using the given event handler.
///
let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id);
}
+ fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.temporary_channel_id.clone())), *counterparty_node_id);
+ }
+
fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
}
+ fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.temporary_channel_id.clone())), *counterparty_node_id);
+ }
+
fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_funding_created(counterparty_node_id, msg), *counterparty_node_id);
});
pending_msg_events.retain(|msg| {
match msg {
+ // V1 Channel Establishment
&events::MessageSendEvent::SendAcceptChannel { .. } => false,
&events::MessageSendEvent::SendOpenChannel { .. } => false,
&events::MessageSendEvent::SendFundingCreated { .. } => false,
&events::MessageSendEvent::SendFundingSigned { .. } => false,
+ // V2 Channel Establishment
+ &events::MessageSendEvent::SendAcceptChannelV2 { .. } => false,
+ &events::MessageSendEvent::SendOpenChannelV2 { .. } => false,
+ // Common Channel Establishment
&events::MessageSendEvent::SendChannelReady { .. } => false,
&events::MessageSendEvent::SendAnnouncementSignatures { .. } => false,
+ // Interactive Transaction Construction
+ &events::MessageSendEvent::SendTxAddInput { .. } => false,
+ &events::MessageSendEvent::SendTxAddOutput { .. } => false,
+ &events::MessageSendEvent::SendTxRemoveInput { .. } => false,
+ &events::MessageSendEvent::SendTxRemoveOutput { .. } => false,
+ &events::MessageSendEvent::SendTxComplete { .. } => false,
+ &events::MessageSendEvent::SendTxSignatures { .. } => false,
+ &events::MessageSendEvent::SendTxInitRbf { .. } => false,
+ &events::MessageSendEvent::SendTxAckRbf { .. } => false,
+ &events::MessageSendEvent::SendTxAbort { .. } => false,
+ // Channel Operations
&events::MessageSendEvent::UpdateHTLCs { .. } => false,
&events::MessageSendEvent::SendRevokeAndACK { .. } => false,
&events::MessageSendEvent::SendClosingSigned { .. } => false,
&events::MessageSendEvent::SendShutdown { .. } => false,
&events::MessageSendEvent::SendChannelReestablish { .. } => false,
+ &events::MessageSendEvent::HandleError { .. } => false,
+ // Gossip
&events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
&events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&events::MessageSendEvent::SendChannelUpdate { .. } => false,
- &events::MessageSendEvent::HandleError { .. } => false,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
&events::MessageSendEvent::SendShortIdsQuery { .. } => false,
&events::MessageSendEvent::SendReplyChannelRange { .. } => false,
fn provided_init_features(&self, _their_init_features: &PublicKey) -> InitFeatures {
provided_init_features(&self.default_configuration)
}
+
+ fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_add_output(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddOutput) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_remove_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxRemoveInput) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_remove_output(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxRemoveOutput) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_complete(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxComplete) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_init_rbf(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxInitRbf) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_ack_rbf(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAckRbf) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
+
+ fn handle_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort) {
+ let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "Dual-funded channels not supported".to_owned(),
+ msg.channel_id.clone())), *counterparty_node_id);
+ }
}
/// Fetches the set of [`NodeFeatures`] flags which are provided by or required by
// should also add the corresponding (optional) bit to the [`ChannelMessageHandler`] impl for
// [`ErroringMessageHandler`].
let mut features = InitFeatures::empty();
- features.set_data_loss_protect_optional();
+ features.set_data_loss_protect_required();
features.set_upfront_shutdown_script_optional();
features.set_variable_length_onion_required();
features.set_static_remote_key_required();
(14, user_channel_id_low, required),
(16, self.balance_msat, required),
(18, self.outbound_capacity_msat, required),
- // Note that by the time we get past the required read above, outbound_capacity_msat will be
- // filled in, so we can safely unwrap it here.
- (19, self.next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
+ (19, self.next_outbound_htlc_limit_msat, required),
(20, self.inbound_capacity_msat, required),
+ (21, self.next_outbound_htlc_minimum_msat, required),
(22, self.confirmations_required, option),
(24, self.force_close_spend_delay, option),
(26, self.is_outbound, required),
// filled in, so we can safely unwrap it here.
(19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
(20, inbound_capacity_msat, required),
+ (21, next_outbound_htlc_minimum_msat, (default_value, 0)),
(22, confirmations_required, option),
(24, force_close_spend_delay, option),
(26, is_outbound, required),
balance_msat: balance_msat.0.unwrap(),
outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
+ next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(),
inbound_capacity_msat: inbound_capacity_msat.0.unwrap(),
confirmations_required,
confirmations,
}
let events = self.pending_events.lock().unwrap();
- (events.len() as u64).write(writer)?;
- for event in events.iter() {
- event.write(writer)?;
- }
-
- let background_events = self.pending_background_events.lock().unwrap();
- (background_events.len() as u64).write(writer)?;
- for event in background_events.iter() {
- match event {
- BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)) => {
- 0u8.write(writer)?;
- funding_txo.write(writer)?;
- monitor_update.write(writer)?;
- },
+ // LDK versions prior to 0.0.115 don't support post-event actions, thus if there's no
+ // actions at all, skip writing the required TLV. Otherwise, pre-0.0.115 versions will
+ // refuse to read the new ChannelManager.
+ let events_not_backwards_compatible = events.iter().any(|(_, action)| action.is_some());
+ if events_not_backwards_compatible {
+ // If we're gonna write a even TLV that will overwrite our events anyway we might as
+ // well save the space and not write any events here.
+ 0u64.write(writer)?;
+ } else {
+ (events.len() as u64).write(writer)?;
+ for (event, _) in events.iter() {
+ event.write(writer)?;
}
}
+ // LDK versions prior to 0.0.116 wrote the `pending_background_events`
+ // `MonitorUpdateRegeneratedOnStartup`s here, however there was never a reason to do so -
+ // the closing monitor updates were always effectively replayed on startup (either directly
+ // by calling `broadcast_latest_holder_commitment_txn` on a `ChannelMonitor` during
+ // deserialization or, in 0.0.115, by regenerating the monitor update itself).
+ 0u64.write(writer)?;
+
// Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in
// `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is
// likely to be identical.
(5, self.our_network_pubkey, required),
(6, monitor_update_blocked_actions_per_peer, option),
(7, self.fake_scid_rand_bytes, required),
+ (8, if events_not_backwards_compatible { Some(&*events) } else { None }, option),
(9, htlc_purposes, vec_type),
(11, self.probing_cookie_secret, required),
(13, htlc_onion_fields, optional_vec),
}
}
+impl Writeable for VecDeque<(Event, Option<EventCompletionAction>)> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ (self.len() as u64).write(w)?;
+ for (event, action) in self.iter() {
+ event.write(w)?;
+ action.write(w)?;
+ #[cfg(debug_assertions)] {
+ // Events are MaybeReadable, in some cases indicating that they shouldn't actually
+ // be persisted and are regenerated on restart. However, if such an event has a
+ // post-event-handling action we'll write nothing for the event and would have to
+ // either forget the action or fail on deserialization (which we do below). Thus,
+ // check that the event is sane here.
+ let event_encoded = event.encode();
+ let event_read: Option<Event> =
+ MaybeReadable::read(&mut &event_encoded[..]).unwrap();
+ if action.is_some() { assert!(event_read.is_some()); }
+ }
+ }
+ Ok(())
+ }
+}
+impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
+ fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let len: u64 = Readable::read(reader)?;
+ const MAX_ALLOC_SIZE: u64 = 1024 * 16;
+ let mut events: Self = VecDeque::with_capacity(cmp::min(
+ MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>() as u64,
+ len) as usize);
+ for _ in 0..len {
+ let ev_opt = MaybeReadable::read(reader)?;
+ let action = Readable::read(reader)?;
+ if let Some(ev) = ev_opt {
+ events.push_back((ev, action));
+ } else if action.is_some() {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+ Ok(events)
+ }
+}
+
/// Arguments for the creation of a ChannelManager that are not deserialized.
///
/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
let mut peer_channels: HashMap<PublicKey, HashMap<[u8; 32], Channel<<SP::Target as SignerProvider>::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
- let mut channel_closures = Vec::new();
+ let mut channel_closures = VecDeque::new();
let mut pending_background_events = Vec::new();
for _ in 0..channel_count {
let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
- if channel.get_cur_holder_commitment_transaction_number() < monitor.get_cur_holder_commitment_number() ||
- channel.get_revoked_counterparty_commitment_transaction_number() < monitor.get_min_seen_secret() ||
- channel.get_cur_counterparty_commitment_transaction_number() < monitor.get_cur_counterparty_commitment_number() ||
- channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() {
+ if channel.get_latest_complete_monitor_update_id() > monitor.get_latest_update_id() {
// If the channel is ahead of the monitor, return InvalidValue:
log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
- log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
+ log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id());
log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
if let Some(monitor_update) = monitor_update {
- pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update));
+ pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup(monitor_update));
}
failed_htlcs.append(&mut new_failed_htlcs);
- channel_closures.push(events::Event::ChannelClosed {
+ channel_closures.push_back((events::Event::ChannelClosed {
channel_id: channel.channel_id(),
user_channel_id: channel.get_user_id(),
reason: ClosureReason::OutdatedChannelManager
- });
+ }, None));
for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
let mut found_htlc = false;
for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() {
// was in-progress, we never broadcasted the funding transaction and can still
// safely discard the channel.
let _ = channel.force_shutdown(false);
- channel_closures.push(events::Event::ChannelClosed {
+ channel_closures.push_back((events::Event::ChannelClosed {
channel_id: channel.channel_id(),
user_channel_id: channel.get_user_id(),
reason: ClosureReason::DisconnectedPeer,
- });
+ }, None));
} else {
log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
for (funding_txo, _) in args.channel_monitors.iter() {
if !funding_txo_set.contains(funding_txo) {
+ log_info!(args.logger, "Queueing monitor update to ensure missing channel {} is force closed",
+ log_bytes!(funding_txo.to_channel_id()));
let monitor_update = ChannelMonitorUpdate {
update_id: CLOSED_CHANNEL_UPDATE_ID,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
};
- pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update)));
+ pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
}
}
}
let event_count: u64 = Readable::read(reader)?;
- let mut pending_events_read: Vec<events::Event> = Vec::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<events::Event>()));
+ let mut pending_events_read: VecDeque<(events::Event, Option<EventCompletionAction>)> =
+ VecDeque::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>()));
for _ in 0..event_count {
match MaybeReadable::read(reader)? {
- Some(event) => pending_events_read.push(event),
+ Some(event) => pending_events_read.push_back((event, None)),
None => continue,
}
}
for _ in 0..background_event_count {
match <u8 as Readable>::read(reader)? {
0 => {
- let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?);
- if pending_background_events.iter().find(|e| {
- let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e;
- *pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update
- }).is_none() {
- pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)));
- }
+ // LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here,
+ // however we really don't (and never did) need them - we regenerate all
+ // on-startup monitor updates.
+ let _: OutPoint = Readable::read(reader)?;
+ let _: ChannelMonitorUpdate = Readable::read(reader)?;
}
_ => return Err(DecodeError::InvalidValue),
}
let mut claimable_htlc_onion_fields = None;
let mut pending_claiming_payments = Some(HashMap::new());
let mut monitor_update_blocked_actions_per_peer = Some(Vec::new());
+ let mut events_override = None;
read_tlv_fields!(reader, {
(1, pending_outbound_payments_no_retry, option),
(2, pending_intercepted_htlcs, option),
(5, received_network_pubkey, option),
(6, monitor_update_blocked_actions_per_peer, option),
(7, fake_scid_rand_bytes, option),
+ (8, events_override, option),
(9, claimable_htlc_purposes, vec_type),
(11, probing_cookie_secret, option),
(13, claimable_htlc_onion_fields, optional_vec),
probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
}
+ if let Some(events) = events_override {
+ pending_events_read = events;
+ }
+
if !channel_closures.is_empty() {
pending_events_read.append(&mut channel_closures);
}
if pending_forward_matches_htlc(&htlc_info) {
log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
- pending_events_read.retain(|event| {
+ pending_events_read.retain(|(event, _)| {
if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
intercepted_id != ev_id
} else { true }
// shut down before the timer hit. Either way, set the time_forwardable to a small
// constant as enough time has likely passed that we should simply handle the forwards
// now, or at least after the user gets a chance to reconnect to our peers.
- pending_events_read.push(events::Event::PendingHTLCsForwardable {
+ pending_events_read.push_back((events::Event::PendingHTLCsForwardable {
time_forwardable: Duration::from_secs(2),
- });
+ }, None));
}
let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material();
previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger);
}
}
- pending_events_read.push(events::Event::PaymentClaimed {
+ pending_events_read.push_back((events::Event::PaymentClaimed {
receiver_node_id,
payment_hash,
purpose: payment.purpose,
amount_msat: claimable_amt_msat,
- });
+ }, None));
}
}
}
use crate::util::errors::APIError;
use crate::util::test_utils;
use crate::util::config::ChannelConfig;
- use crate::chain::keysinterface::EntropySource;
+ use crate::sign::EntropySource;
#[test]
fn test_notify_limits() {
};
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
- None, nodes[0].logger, &scorer, &random_seed_bytes
+ None, nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
let payment_preimage = PaymentPreimage([42; 32]);
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
- None, nodes[0].logger, &scorer, &random_seed_bytes
+ None, nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
- nodes[0].logger, &scorer, &random_seed_bytes
+ nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
- nodes[0].logger, &scorer, &random_seed_bytes
+ nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
}
}
-#[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]
+#[cfg(ldk_bench)]
pub mod bench {
use crate::chain::Listen;
use crate::chain::chainmonitor::{ChainMonitor, Persist};
- use crate::chain::keysinterface::{KeysManager, InMemorySigner};
+ use crate::sign::{KeysManager, InMemorySigner};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
use crate::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId, RecipientOnionFields, Retry};
use crate::ln::functional_test_utils::*;
use crate::sync::{Arc, Mutex};
- use test::Bencher;
+ use criterion::Criterion;
type Manager<'a, P> = ChannelManager<
&'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
}
- #[cfg(test)]
- #[bench]
- fn bench_sends(bench: &mut Bencher) {
- bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new());
+ pub fn bench_sends(bench: &mut Criterion) {
+ bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new());
}
- pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Bencher, persister_a: P, persister_b: P) {
+ pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) {
// Do a simple benchmark of sending a payment back and forth between two nodes.
// Note that this is unrealistic as each payment send will require at least two fsync
// calls per node.
assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
- let block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: BestBlock::from_network(network).block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
- txdata: vec![tx],
- };
+ let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]);
Listen::block_connected(&node_a, &block, 1);
Listen::block_connected(&node_b, &block, 1);
}
}
- bench.iter(|| {
+ bench.bench_function(bench_name, |b| b.iter(|| {
send_payment!(node_a, node_b);
send_payment!(node_b, node_a);
- });
+ }));
}
}