use crate::ln::inbound_payment;
use crate::ln::types::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
-pub use crate::ln::channel_state::{ChannelCounterparty, ChannelDetails, ChannelShutdownState, CounterpartyForwardingInfo};
-pub use crate::ln::channel_state::{InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails};
+use crate::ln::channel_state::ChannelDetails;
use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::Bolt11InvoiceFeatures;
use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
#[cfg(test)]
use crate::ln::outbound_payment;
-use crate::ln::outbound_payment::{Bolt12PaymentError, OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration};
+use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration};
use crate::ln::wire::Encode;
use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice};
use crate::offers::invoice_error::InvoiceError;
use core::ops::Deref;
// Re-export this for use in the public API.
-pub use crate::ln::outbound_payment::{PaymentSendFailure, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields};
+pub use crate::ln::outbound_payment::{Bolt12PaymentError, PaymentSendFailure, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields};
use crate::ln::script::ShutdownScript;
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
err: msg,
action: msgs::ErrorAction::IgnoreError,
},
- ChannelError::Close(msg) => LightningError {
+ ChannelError::Close((msg, _reason)) => LightningError {
err: msg.clone(),
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage {
/// accepted. An unaccepted channel that exceeds this limit will be abandoned.
const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2;
+/// The number of blocks of historical feerate estimates we keep around and consider when deciding
+/// to force-close a channel for having too-low fees. Also the number of blocks we have to see
+/// after startup before we consider force-closing channels for having too-low fees.
+pub(super) const FEERATE_TRACKING_BLOCKS: usize = 144;
+
/// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
/// actually ours and not some duplicate HTLC sent to us by a node along the route.
///
/// #
/// # fn example<T: AChannelManager>(channel_manager: T) {
/// # let channel_manager = channel_manager.get_cm();
+/// # let error_message = "Channel force-closed";
/// channel_manager.process_pending_events(&|event| match event {
/// Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => {
/// if !is_trusted(counterparty_node_id) {
/// match channel_manager.force_close_without_broadcasting_txn(
-/// &temporary_channel_id, &counterparty_node_id
+/// &temporary_channel_id, &counterparty_node_id, error_message.to_string()
/// ) {
/// Ok(()) => println!("Rejecting channel {}", temporary_channel_id),
/// Err(e) => println!("Error rejecting channel {}: {:?}", temporary_channel_id, e),
/// #
/// # fn example<T: AChannelManager>(channel_manager: T) -> Result<(), Bolt12SemanticError> {
/// # let channel_manager = channel_manager.get_cm();
+/// # let absolute_expiry = None;
/// let offer = channel_manager
-/// .create_offer_builder()?
+/// .create_offer_builder(absolute_expiry)?
/// # ;
/// # // Needed for compiling for c_bindings
/// # let builder: lightning::offers::offer::OfferBuilder<_, _> = offer.into();
/// Tracks the message events that are to be broadcasted when we are connected to some peer.
pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
+ /// We only want to force-close our channels on peers based on stale feerates when we're
+ /// confident the feerate on the channel is *really* stale, not just became stale recently.
+ /// Thus, we store the fee estimates we had as of the last [`FEERATE_TRACKING_BLOCKS`] blocks
+ /// (after startup completed) here, and only force-close when channels have a lower feerate
+ /// than we predicted any time in the last [`FEERATE_TRACKING_BLOCKS`] blocks.
+ ///
+ /// We only keep this in memory as we assume any feerates we receive immediately after startup
+ /// may be bunk (as they often are if Bitcoin Core crashes) and want to delay taking any
+ /// actions for a day anyway.
+ ///
+ /// The first element in the pair is the
+ /// [`ConfirmationTarget::MinAllowedAnchorChannelRemoteFee`] estimate, the second the
+ /// [`ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee`] estimate.
+ last_days_feerates: Mutex<VecDeque<(u32, u32)>>,
+
entropy_source: ES,
node_signer: NS,
signer_provider: SP,
/// many peers we reject new (inbound) connections.
const MAX_NO_CHANNEL_PEERS: usize = 250;
+/// The maximum expiration from the current time where an [`Offer`] or [`Refund`] is considered
+/// short-lived, while anything with a greater expiration is considered long-lived.
+///
+/// Using [`ChannelManager::create_offer_builder`] or [`ChannelManager::create_refund_builder`],
+/// will included a [`BlindedPath`] created using:
+/// - [`MessageRouter::create_compact_blinded_paths`] when short-lived, and
+/// - [`MessageRouter::create_blinded_paths`] when long-lived.
+///
+/// Using compact [`BlindedPath`]s may provide better privacy as the [`MessageRouter`] could select
+/// more hops. However, since they use short channel ids instead of pubkeys, they are more likely to
+/// become invalid over time as channels are closed. Thus, they are only suitable for short-term use.
+pub const MAX_SHORT_LIVED_RELATIVE_EXPIRY: Duration = Duration::from_secs(60 * 60 * 24);
+
/// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments.
/// These include payments that have yet to find a successful path, or have unresolved HTLCs.
#[derive(Debug, PartialEq)]
ChannelError::Ignore(msg) => {
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
},
- ChannelError::Close(msg) => {
+ ChannelError::Close((msg, reason)) => {
let logger = WithChannelContext::from(&$self.logger, &$channel.context, None);
log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
update_maps_on_chan_removal!($self, $channel.context);
- let reason = ClosureReason::ProcessingError { err: msg.clone() };
let shutdown_res = $channel.context.force_shutdown(true, reason);
let err =
MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update);
pending_offers_messages: Mutex::new(Vec::new()),
pending_broadcast_messages: Mutex::new(Vec::new()),
+ last_days_feerates: Mutex::new(VecDeque::new()),
+
entropy_source,
node_signer,
signer_provider,
}
} else {
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
- shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed));
+ shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }));
}
},
hash_map::Entry::Vacant(_) => {
let closure_reason = if let Some(peer_msg) = peer_msg {
ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
} else {
- ClosureReason::HolderForceClosed
+ ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(broadcast) }
};
let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None);
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
Ok(counterparty_node_id)
}
- fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
+ fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool, error_message: String)
+ -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ log_debug!(self.logger,
+ "Force-closing channel, The error message sent to the peer : {}", error_message);
match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
Ok(counterparty_node_id) => {
let per_peer_state = self.per_peer_state.read().unwrap();
peer_state.pending_msg_events.push(
events::MessageSendEvent::HandleError {
node_id: counterparty_node_id,
- action: msgs::ErrorAction::DisconnectPeer {
- msg: Some(msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() })
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: *channel_id, data: error_message }
},
}
);
}
}
- /// Force closes a channel, immediately broadcasting the latest local transaction(s) and
- /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
- /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
- /// channel.
- pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
+ /// Force closes a channel, immediately broadcasting the latest local transaction(s),
+ /// rejecting new HTLCs.
+ ///
+ /// The provided `error_message` is sent to connected peers for closing
+ /// channels and should be a human-readable description of what went wrong.
+ ///
+ /// Fails if `channel_id` is unknown to the manager, or if the `counterparty_node_id`
+ /// isn't the counterparty of the corresponding channel.
+ pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
-> Result<(), APIError> {
- self.force_close_sending_error(channel_id, counterparty_node_id, true)
+ self.force_close_sending_error(channel_id, counterparty_node_id, true, error_message)
}
/// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
- /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
- /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
+ /// the latest local transaction(s).
+ ///
+ /// The provided `error_message` is sent to connected peers for closing channels and should
+ /// be a human-readable description of what went wrong.
///
+ /// Fails if `channel_id` is unknown to the manager, or if the
+ /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
/// You can always broadcast the latest local transaction(s) via
/// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`].
- pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
+ pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
-> Result<(), APIError> {
- self.force_close_sending_error(channel_id, counterparty_node_id, false)
+ self.force_close_sending_error(channel_id, counterparty_node_id, false, error_message)
}
/// Force close all channels, immediately broadcasting the latest local commitment transaction
/// for each to the chain and rejecting new HTLCs on each.
- pub fn force_close_all_channels_broadcasting_latest_txn(&self) {
+ ///
+ /// The provided `error_message` is sent to connected peers for closing channels and should
+ /// be a human-readable description of what went wrong.
+ pub fn force_close_all_channels_broadcasting_latest_txn(&self, error_message: String) {
for chan in self.list_channels() {
- let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id);
+ let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
}
}
/// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
/// local transaction(s).
- pub fn force_close_all_channels_without_broadcasting_txn(&self) {
+ ///
+ /// The provided `error_message` is sent to connected peers for closing channels and
+ /// should be a human-readable description of what went wrong.
+ pub fn force_close_all_channels_without_broadcasting_txn(&self, error_message: String) {
for chan in self.list_channels() {
- let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id);
+ let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
}
}
self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata);
}
- pub(super) fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
+ /// Pays the [`Bolt12Invoice`] associated with the `payment_id` encoded in its `payer_metadata`.
+ ///
+ /// The invoice's `payer_metadata` is used to authenticate that the invoice was indeed requested
+ /// before attempting a payment. [`Bolt12PaymentError::UnexpectedInvoice`] is returned if this
+ /// fails or if the encoded `payment_id` is not recognized. The latter may happen once the
+ /// payment is no longer tracked because the payment was attempted after:
+ /// - an invoice for the `payment_id` was already paid,
+ /// - one full [timer tick] has elapsed since initially requesting the invoice when paying an
+ /// offer, or
+ /// - the refund corresponding to the invoice has already expired.
+ ///
+ /// To retry the payment, request another invoice using a new `payment_id`.
+ ///
+ /// Attempting to pay the same invoice twice while the first payment is still pending will
+ /// result in a [`Bolt12PaymentError::DuplicateInvoice`].
+ ///
+ /// Otherwise, either [`Event::PaymentSent`] or [`Event::PaymentFailed`] are used to indicate
+ /// whether or not the payment was successful.
+ ///
+ /// [timer tick]: Self::timer_tick_occurred
+ pub fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice) -> Result<(), Bolt12PaymentError> {
+ let secp_ctx = &self.secp_ctx;
+ let expanded_key = &self.inbound_payment_key;
+ match invoice.verify(expanded_key, secp_ctx) {
+ Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id),
+ Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice),
+ }
+ }
+
+ fn send_payment_for_verified_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
.send_payment_for_bolt12_invoice(
invoice, payment_id, &self.router, self.list_usable_channels(),
- || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer,
- best_block_height, &self.logger, &self.pending_events,
+ || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self,
+ &self.secp_ctx, best_block_height, &self.logger, &self.pending_events,
|args| self.send_payment_along_path(args)
)
}
Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { {
let counterparty;
- let err = if let ChannelError::Close(msg) = $err {
+ let err = if let ChannelError::Close((msg, reason)) = $err {
let channel_id = $chan.context.channel_id();
counterparty = chan.context.get_counterparty_node_id();
- let reason = ClosureReason::ProcessingError { err: msg.clone() };
let shutdown_res = $chan.context.force_shutdown(false, reason);
MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None)
} else { unreachable!(); };
match find_funding_output(&chan, &funding_transaction) {
Ok(found_funding_txo) => funding_txo = found_funding_txo,
Err(err) => {
- let chan_err = ChannelError::Close(err.to_owned());
+ let chan_err = ChannelError::close(err.to_owned());
let api_err = APIError::APIMisuseError { err: err.to_owned() };
return close_chan!(chan_err, api_err, chan);
},
if short_chan_id != 0 {
let mut forwarding_counterparty = None;
macro_rules! forwarding_channel_not_found {
- () => {
- for forward_info in pending_forwards.drain(..) {
+ ($forward_infos: expr) => {
+ for forward_info in $forward_infos {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
Some((cp_id, chan_id)) => (cp_id, chan_id),
None => {
- forwarding_channel_not_found!();
+ forwarding_channel_not_found!(pending_forwards.drain(..));
continue;
}
};
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() {
- forwarding_channel_not_found!();
+ forwarding_channel_not_found!(pending_forwards.drain(..));
continue;
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
- let logger = WithChannelContext::from(&self.logger, &chan.context, None);
- for forward_info in pending_forwards.drain(..) {
- let queue_fail_htlc_res = match forward_info {
- HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
- prev_user_channel_id, forward_info: PendingHTLCInfo {
- incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
- routing: PendingHTLCRouting::Forward {
- onion_packet, blinded, ..
- }, skimmed_fee_msat, ..
+ let mut draining_pending_forwards = pending_forwards.drain(..);
+ while let Some(forward_info) = draining_pending_forwards.next() {
+ let queue_fail_htlc_res = match forward_info {
+ HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
+ incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
+ routing: PendingHTLCRouting::Forward {
+ ref onion_packet, blinded, ..
+ }, skimmed_fee_msat, ..
+ },
+ }) => {
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: incoming_shared_secret,
+ // Phantom payments are only PendingHTLCRouting::Receive.
+ phantom_shared_secret: None,
+ blinded_failure: blinded.map(|b| b.failure),
+ });
+ let next_blinding_point = blinded.and_then(|b| {
+ let encrypted_tlvs_ss = self.node_signer.ecdh(
+ Recipient::Node, &b.inbound_blinding_point, None
+ ).unwrap().secret_bytes();
+ onion_utils::next_hop_pubkey(
+ &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
+ ).ok()
+ });
+
+ // Forward the HTLC over the most appropriate channel with the corresponding peer,
+ // applying non-strict forwarding.
+ // The channel with the least amount of outbound liquidity will be used to maximize the
+ // probability of being able to successfully forward a subsequent HTLC.
+ let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase {
+ ChannelPhase::Funded(chan) => {
+ let balances = chan.context.get_available_balances(&self.fee_estimator);
+ if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat &&
+ outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat &&
+ chan.context.is_usable() {
+ Some((chan, balances))
+ } else {
+ None
+ }
},
- }) => {
- let logger = WithChannelContext::from(&self.logger, &chan.context, Some(payment_hash));
- log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
- let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- user_channel_id: Some(prev_user_channel_id),
- channel_id: prev_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: incoming_shared_secret,
- // Phantom payments are only PendingHTLCRouting::Receive.
- phantom_shared_secret: None,
- blinded_failure: blinded.map(|b| b.failure),
- });
- let next_blinding_point = blinded.and_then(|b| {
- let encrypted_tlvs_ss = self.node_signer.ecdh(
- Recipient::Node, &b.inbound_blinding_point, None
- ).unwrap().secret_bytes();
- onion_utils::next_hop_pubkey(
- &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
- ).ok()
- });
- if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat,
- payment_hash, outgoing_cltv_value, htlc_source.clone(),
- onion_packet, skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
- &&logger)
- {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
+ _ => None,
+ }).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c);
+ let optimal_channel = match maybe_optimal_channel {
+ Some(chan) => chan,
+ None => {
+ // Fall back to the specified channel to return an appropriate error.
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ chan
} else {
- panic!("Stated return value requirements in send_htlc() were not met");
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
}
+ }
+ };
+
+ let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash));
+ let channel_description = if optimal_channel.context.get_short_channel_id() == Some(short_chan_id) {
+ "specified"
+ } else {
+ "alternate"
+ };
+ log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}",
+ prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id);
+ if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat,
+ payment_hash, outgoing_cltv_value, htlc_source.clone(),
+ onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
+ &&logger)
+ {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg);
+ } else {
+ panic!("Stated return value requirements in send_htlc() were not met");
+ }
+
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan);
failed_forwards.push((htlc_source, payment_hash,
HTLCFailReason::reason(failure_code, data),
HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id }
));
- continue;
+ } else {
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
}
- None
- },
- HTLCForwardInfo::AddHTLC { .. } => {
- panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
- },
- HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
+ }
+ None
+ },
+ HTLCForwardInfo::AddHTLC { .. } => {
+ panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
+ },
+ HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => {
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
- Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id))
- },
- HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id))
+ } else {
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
+ }
+ },
+ HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
let res = chan.queue_fail_malformed_htlc(
htlc_id, failure_code, sha256_of_onion, &&logger
);
Some((res, htlc_id))
- },
- };
- if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
- if let Err(e) = queue_fail_htlc_res {
- if let ChannelError::Ignore(msg) = e {
+ } else {
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
+ }
+ },
+ };
+ if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
+ if let Err(e) = queue_fail_htlc_res {
+ if let ChannelError::Ignore(msg) = e {
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
- } else {
- panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
}
- // fail-backs are best-effort, we probably already have one
- // pending, and if not that's OK, if not, the channel is on
- // the chain and sending the HTLC-Timeout is their problem.
- continue;
+ } else {
+ panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
}
+ // fail-backs are best-effort, we probably already have one
+ // pending, and if not that's OK, if not, the channel is on
+ // the chain and sending the HTLC-Timeout is their problem.
+ continue;
}
}
- } else {
- forwarding_channel_not_found!();
- continue;
}
} else {
'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
log_error!(logger,
"Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
update_maps_on_chan_removal!(self, &context);
- shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed));
+ shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }));
pending_msg_events.push(MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendErrorMessage {
}
if valid_mpp {
for htlc in sources.drain(..) {
- let prev_hop_chan_id = htlc.prev_hop.channel_id;
- if let Err((pk, err)) = self.claim_funds_from_hop(
+ self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage,
|_, definitely_duplicate| {
debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })
}
- ) {
- if let msgs::ErrorAction::IgnoreError = err.err.action {
- // We got a temporary failure updating monitor, but will claim the
- // HTLC when the monitor updating is restored (or on chain).
- let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id), Some(payment_hash));
- log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
- } else { errs.push((pk, err)); }
- }
+ );
}
}
if !valid_mpp {
}
}
- fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(&self,
- prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
- -> Result<(), (PublicKey, MsgHandleErrInternal)> {
+ fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(
+ &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
+ completion_action: ComplFunc,
+ ) {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
// If we haven't yet run background events assume we're still deserializing and shouldn't
let action = if let Some(action) = completion_action(None, true) {
action
} else {
- return Ok(());
+ return;
};
mem::drop(peer_state_lock);
} else {
debug_assert!(false,
"Duplicate claims should always free another channel immediately");
- return Ok(());
+ return;
};
if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
let mut peer_state = peer_state_mtx.lock().unwrap();
}
}
}
- return Ok(());
+ return;
}
}
}
// generally always allowed to be duplicative (and it's specifically noted in
// `PaymentForwarded`).
self.handle_monitor_update_completion_actions(completion_action(None, false));
- Ok(())
}
fn finalize_claims(&self, sources: Vec<HTLCSource>) {
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
#[cfg(debug_assertions)]
let claiming_chan_funding_outpoint = hop_data.outpoint;
- let res = self.claim_funds_from_hop(hop_data, payment_preimage,
+ self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release =
if let Some(node_id) = next_channel_counterparty_node_id {
})
}
});
- if let Err((pk, err)) = res {
- let result: Result<(), _> = Err(err);
- let _ = handle_error!(self, result, pk);
- }
},
}
}
log_trace!(logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
highest_applied_update_id, channel.context.get_latest_monitor_update_id(),
remaining_in_flight);
- if !channel.is_awaiting_monitor_update() || channel.context.get_latest_monitor_update_id() != highest_applied_update_id {
+ if !channel.is_awaiting_monitor_update() || remaining_in_flight != 0 {
return;
}
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel);
},
Some(mut phase) => {
let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id);
- let err = ChannelError::Close(err_msg);
+ let err = ChannelError::close(err_msg);
return Err(convert_chan_phase_err!(self, err, &mut phase, &msg.temporary_channel_id).1);
},
None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
// `update_maps_on_chan_removal`), we'll remove the existing channel
// from `outpoint_to_peer`. Thus, we must first unset the funding outpoint
// on the channel.
- let err = ChannelError::Close($err.to_owned());
+ let err = ChannelError::close($err.to_owned());
chan.unset_funding_info(msg.temporary_channel_id);
return Err(convert_chan_phase_err!(self, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1);
} } }
} else { unreachable!(); }
Ok(())
} else {
- let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
+ let e = ChannelError::close("Channel funding outpoint was a duplicate".to_owned());
// We weren't able to watch the channel to begin with, so no
// updates should be made on it. Previously, full_stack_target
// found an (unreachable) panic when the monitor update contained
Ok(())
} else {
- try_chan_phase_entry!(self, Err(ChannelError::Close(
+ try_chan_phase_entry!(self, Err(ChannelError::close(
"Got a channel_ready message for an unfunded channel!".into())), chan_phase_entry)
}
},
(tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result)
} else { (tx, None, shutdown_result) }
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
}
},
}
try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, &self.fee_estimator), chan_phase_entry);
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
}
},
next_user_channel_id = chan.context.get_user_id();
res
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_phase_entry);
}
},
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
try_chan_phase_entry!(self, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry);
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got an update_fail_htlc message for an unfunded channel!".into())), chan_phase_entry);
}
},
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if (msg.failure_code & 0x8000) == 0 {
- let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
+ let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
try_chan_phase_entry!(self, Err(chan_err), chan_phase_entry);
}
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
try_chan_phase_entry!(self, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry);
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_phase_entry);
}
Ok(())
}
Ok(())
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
}
},
}
htlcs_to_fail
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
}
},
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got an update_fee message for an unfunded channel!".into())), chan_phase_entry);
}
},
update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()),
});
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got an announcement_signatures message for an unfunded channel!".into())), chan_phase_entry);
}
},
}
}
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got a channel_update for an unfunded channel!".into())), chan_phase_entry);
}
},
}
need_lnd_workaround
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
}
},
let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
reason
} else {
- ClosureReason::HolderForceClosed
+ ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
};
failed_channels.push(chan.context.force_shutdown(false, reason.clone()));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
/// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
- /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will
- /// not have an expiration unless otherwise set on the builder.
+ /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer's
+ /// expiration will be `absolute_expiry` if `Some`, otherwise it will not expire.
///
/// # Privacy
///
- /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the offer.
- /// However, if one is not found, uses a one-hop [`BlindedPath`] with
- /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
- /// the node must be announced, otherwise, there is no way to find a path to the introduction in
- /// order to send the [`InvoiceRequest`].
+ /// Uses [`MessageRouter`] to construct a [`BlindedPath`] for the offer based on the given
+ /// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for
+ /// privacy implications as well as those of the parameterized [`Router`], which implements
+ /// [`MessageRouter`].
///
/// Also, uses a derived signing pubkey in the offer for recipient privacy.
///
///
/// [`Offer`]: crate::offers::offer::Offer
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
- pub fn create_offer_builder(&$self) -> Result<$builder, Bolt12SemanticError> {
+ pub fn create_offer_builder(
+ &$self, absolute_expiry: Option<Duration>
+ ) -> Result<$builder, Bolt12SemanticError> {
let node_id = $self.get_our_node_id();
let expanded_key = &$self.inbound_payment_key;
let entropy = &*$self.entropy_source;
let secp_ctx = &$self.secp_ctx;
- let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ let path = $self.create_blinded_path_using_absolute_expiry(absolute_expiry)
+ .map_err(|_| Bolt12SemanticError::MissingPaths)?;
let builder = OfferBuilder::deriving_signing_pubkey(
node_id, expanded_key, entropy, secp_ctx
)
.chain_hash($self.chain_hash)
.path(path);
+ let builder = match absolute_expiry {
+ None => builder,
+ Some(absolute_expiry) => builder.absolute_expiry(absolute_expiry),
+ };
+
Ok(builder.into())
}
} }
///
/// # Privacy
///
- /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the refund.
- /// However, if one is not found, uses a one-hop [`BlindedPath`] with
- /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
- /// the node must be announced, otherwise, there is no way to find a path to the introduction in
- /// order to send the [`Bolt12Invoice`].
+ /// Uses [`MessageRouter`] to construct a [`BlindedPath`] for the refund based on the given
+ /// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for
+ /// privacy implications as well as those of the parameterized [`Router`], which implements
+ /// [`MessageRouter`].
///
/// Also, uses a derived payer id in the refund for payer privacy.
///
let entropy = &*$self.entropy_source;
let secp_ctx = &$self.secp_ctx;
- let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ let path = $self.create_blinded_path_using_absolute_expiry(Some(absolute_expiry))
+ .map_err(|_| Bolt12SemanticError::MissingPaths)?;
let builder = RefundBuilder::deriving_payer_id(
node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
)?
///
/// # Privacy
///
- /// Uses a one-hop [`BlindedPath`] for the reply path with [`ChannelManager::get_our_node_id`]
- /// as the introduction node and a derived payer id for payer privacy. As such, currently, the
- /// node must be announced. Otherwise, there is no way to find a path to the introduction node
- /// in order to send the [`Bolt12Invoice`].
+ /// For payer privacy, uses a derived payer id and uses [`MessageRouter::create_blinded_paths`]
+ /// to construct a [`BlindedPath`] for the reply path. For further privacy implications, see the
+ /// docs of the parameterized [`Router`], which implements [`MessageRouter`].
///
/// # Limitations
///
inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
}
+ /// Creates a blinded path by delegating to [`MessageRouter`] based on the path's intended
+ /// lifetime.
+ ///
+ /// Whether or not the path is compact depends on whether the path is short-lived or long-lived,
+ /// respectively, based on the given `absolute_expiry` as seconds since the Unix epoch. See
+ /// [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`].
+ fn create_blinded_path_using_absolute_expiry(
+ &self, absolute_expiry: Option<Duration>
+ ) -> Result<BlindedPath, ()> {
+ let now = self.duration_since_epoch();
+ let max_short_lived_absolute_expiry = now.saturating_add(MAX_SHORT_LIVED_RELATIVE_EXPIRY);
+
+ if absolute_expiry.unwrap_or(Duration::MAX) <= max_short_lived_absolute_expiry {
+ self.create_compact_blinded_path()
+ } else {
+ self.create_blinded_path()
+ }
+ }
+
+ pub(super) fn duration_since_epoch(&self) -> Duration {
+ #[cfg(not(feature = "std"))]
+ let now = Duration::from_secs(
+ self.highest_seen_timestamp.load(Ordering::Acquire) as u64
+ );
+ #[cfg(feature = "std")]
+ let now = std::time::SystemTime::now()
+ .duration_since(std::time::SystemTime::UNIX_EPOCH)
+ .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
+
+ now
+ }
+
/// Creates a blinded path by delegating to [`MessageRouter::create_blinded_paths`].
///
/// Errors if the `MessageRouter` errors or returns an empty `Vec`.
let peers = self.per_peer_state.read().unwrap()
.iter()
.map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
+ .filter(|(_, peer)| peer.is_connected)
+ .filter(|(_, peer)| peer.latest_features.supports_onion_messages())
+ .map(|(node_id, _)| *node_id)
+ .collect::<Vec<_>>();
+
+ self.router
+ .create_blinded_paths(recipient, peers, secp_ctx)
+ .and_then(|paths| paths.into_iter().next().ok_or(()))
+ }
+
+ /// Creates a blinded path by delegating to [`MessageRouter::create_compact_blinded_paths`].
+ ///
+ /// Errors if the `MessageRouter` errors or returns an empty `Vec`.
+ fn create_compact_blinded_path(&self) -> Result<BlindedPath, ()> {
+ let recipient = self.get_our_node_id();
+ let secp_ctx = &self.secp_ctx;
+
+ let peers = self.per_peer_state.read().unwrap()
+ .iter()
+ .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
+ .filter(|(_, peer)| peer.is_connected)
.filter(|(_, peer)| peer.latest_features.supports_onion_messages())
.map(|(node_id, peer)| ForwardNode {
node_id: *node_id,
.collect::<Vec<_>>();
self.router
- .create_blinded_paths(recipient, peers, secp_ctx)
+ .create_compact_blinded_paths(recipient, peers, secp_ctx)
.and_then(|paths| paths.into_iter().next().ok_or(()))
}
self, || -> NotifyOption { NotifyOption::DoPersist });
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
- self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
+ let mut min_anchor_feerate = None;
+ let mut min_non_anchor_feerate = None;
+ if self.background_events_processed_since_startup.load(Ordering::Relaxed) {
+ // If we're past the startup phase, update our feerate cache
+ let mut last_days_feerates = self.last_days_feerates.lock().unwrap();
+ if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
+ last_days_feerates.pop_front();
+ }
+ let anchor_feerate = self.fee_estimator
+ .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedAnchorChannelRemoteFee);
+ let non_anchor_feerate = self.fee_estimator
+ .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee);
+ last_days_feerates.push_back((anchor_feerate, non_anchor_feerate));
+ if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
+ min_anchor_feerate = last_days_feerates.iter().map(|(f, _)| f).min().copied();
+ min_non_anchor_feerate = last_days_feerates.iter().map(|(_, f)| f).min().copied();
+ }
+ }
+
+ self.do_chain_event(Some(height), |channel| {
+ let logger = WithChannelContext::from(&self.logger, &channel.context, None);
+ if channel.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ if let Some(feerate) = min_anchor_feerate {
+ channel.check_for_stale_feerate(&logger, feerate)?;
+ }
+ } else {
+ if let Some(feerate) = min_non_anchor_feerate {
+ channel.check_for_stale_feerate(&logger, feerate)?;
+ }
+ }
+ channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
+ });
macro_rules! max_time {
($timestamp: expr) => {
};
match response {
- Ok(invoice) => return responder.respond(OffersMessage::Invoice(invoice)),
- Err(error) => return responder.respond(OffersMessage::InvoiceError(error.into())),
+ Ok(invoice) => responder.respond(OffersMessage::Invoice(invoice)),
+ Err(error) => responder.respond(OffersMessage::InvoiceError(error.into())),
}
},
OffersMessage::Invoice(invoice) => {
- let response = invoice
- .verify(expanded_key, secp_ctx)
- .map_err(|()| InvoiceError::from_string("Unrecognized invoice".to_owned()))
- .and_then(|payment_id| {
+ let result = match invoice.verify(expanded_key, secp_ctx) {
+ Ok(payment_id) => {
let features = self.bolt12_invoice_features();
if invoice.invoice_features().requires_unknown_bits_from(&features) {
Err(InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures))
+ } else if self.default_configuration.manually_handle_bolt12_invoices {
+ let event = Event::InvoiceReceived { payment_id, invoice, responder };
+ self.pending_events.lock().unwrap().push_back((event, None));
+ return ResponseInstruction::NoResponse;
} else {
- self.send_payment_for_bolt12_invoice(&invoice, payment_id)
+ self.send_payment_for_verified_bolt12_invoice(&invoice, payment_id)
.map_err(|e| {
log_trace!(self.logger, "Failed paying invoice: {:?}", e);
InvoiceError::from_string(format!("{:?}", e))
})
}
- });
+ },
+ Err(()) => Err(InvoiceError::from_string("Unrecognized invoice".to_owned())),
+ };
- match (responder, response) {
- (Some(responder), Err(e)) => responder.respond(OffersMessage::InvoiceError(e)),
- (None, Err(_)) => {
- log_trace!(
- self.logger,
- "A response was generated, but there is no reply_path specified for sending the response."
- );
- return ResponseInstruction::NoResponse;
- }
- _ => return ResponseInstruction::NoResponse,
+ match result {
+ Ok(()) => ResponseInstruction::NoResponse,
+ Err(e) => match responder {
+ Some(responder) => responder.respond(OffersMessage::InvoiceError(e)),
+ None => {
+ log_trace!(self.logger, "No reply path for sending invoice error: {:?}", e);
+ ResponseInstruction::NoResponse
+ },
+ },
}
},
OffersMessage::InvoiceError(invoice_error) => {
log_trace!(self.logger, "Received invoice_error: {}", invoice_error);
- return ResponseInstruction::NoResponse;
+ ResponseInstruction::NoResponse
},
}
}
const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1;
-impl_writeable_tlv_based!(CounterpartyForwardingInfo, {
- (2, fee_base_msat, required),
- (4, fee_proportional_millionths, required),
- (6, cltv_expiry_delta, required),
-});
-
-impl_writeable_tlv_based!(ChannelCounterparty, {
- (2, node_id, required),
- (4, features, required),
- (6, unspendable_punishment_reserve, required),
- (8, forwarding_info, option),
- (9, outbound_htlc_minimum_msat, option),
- (11, outbound_htlc_maximum_msat, option),
-});
-
-impl Writeable for ChannelDetails {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
- // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
- // versions prior to 0.0.113, the u128 is serialized as two separate u64 values.
- let user_channel_id_low = self.user_channel_id as u64;
- let user_channel_id_high_opt = Some((self.user_channel_id >> 64) as u64);
- write_tlv_fields!(writer, {
- (1, self.inbound_scid_alias, option),
- (2, self.channel_id, required),
- (3, self.channel_type, option),
- (4, self.counterparty, required),
- (5, self.outbound_scid_alias, option),
- (6, self.funding_txo, option),
- (7, self.config, option),
- (8, self.short_channel_id, option),
- (9, self.confirmations, option),
- (10, self.channel_value_satoshis, required),
- (12, self.unspendable_punishment_reserve, option),
- (14, user_channel_id_low, required),
- (16, self.balance_msat, required),
- (18, self.outbound_capacity_msat, required),
- (19, self.next_outbound_htlc_limit_msat, required),
- (20, self.inbound_capacity_msat, required),
- (21, self.next_outbound_htlc_minimum_msat, required),
- (22, self.confirmations_required, option),
- (24, self.force_close_spend_delay, option),
- (26, self.is_outbound, required),
- (28, self.is_channel_ready, required),
- (30, self.is_usable, required),
- (32, self.is_public, required),
- (33, self.inbound_htlc_minimum_msat, option),
- (35, self.inbound_htlc_maximum_msat, option),
- (37, user_channel_id_high_opt, option),
- (39, self.feerate_sat_per_1000_weight, option),
- (41, self.channel_shutdown_state, option),
- (43, self.pending_inbound_htlcs, optional_vec),
- (45, self.pending_outbound_htlcs, optional_vec),
- });
- Ok(())
- }
-}
-
-impl Readable for ChannelDetails {
- fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
- _init_and_read_len_prefixed_tlv_fields!(reader, {
- (1, inbound_scid_alias, option),
- (2, channel_id, required),
- (3, channel_type, option),
- (4, counterparty, required),
- (5, outbound_scid_alias, option),
- (6, funding_txo, option),
- (7, config, option),
- (8, short_channel_id, option),
- (9, confirmations, option),
- (10, channel_value_satoshis, required),
- (12, unspendable_punishment_reserve, option),
- (14, user_channel_id_low, required),
- (16, balance_msat, required),
- (18, outbound_capacity_msat, required),
- // Note that by the time we get past the required read above, outbound_capacity_msat will be
- // filled in, so we can safely unwrap it here.
- (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
- (20, inbound_capacity_msat, required),
- (21, next_outbound_htlc_minimum_msat, (default_value, 0)),
- (22, confirmations_required, option),
- (24, force_close_spend_delay, option),
- (26, is_outbound, required),
- (28, is_channel_ready, required),
- (30, is_usable, required),
- (32, is_public, required),
- (33, inbound_htlc_minimum_msat, option),
- (35, inbound_htlc_maximum_msat, option),
- (37, user_channel_id_high_opt, option),
- (39, feerate_sat_per_1000_weight, option),
- (41, channel_shutdown_state, option),
- (43, pending_inbound_htlcs, optional_vec),
- (45, pending_outbound_htlcs, optional_vec),
- });
-
- // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
- // versions prior to 0.0.113, the u128 is serialized as two separate u64 values.
- let user_channel_id_low: u64 = user_channel_id_low.0.unwrap();
- let user_channel_id = user_channel_id_low as u128 +
- ((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64);
-
- Ok(Self {
- inbound_scid_alias,
- channel_id: channel_id.0.unwrap(),
- channel_type,
- counterparty: counterparty.0.unwrap(),
- outbound_scid_alias,
- funding_txo,
- config,
- short_channel_id,
- channel_value_satoshis: channel_value_satoshis.0.unwrap(),
- unspendable_punishment_reserve,
- user_channel_id,
- balance_msat: balance_msat.0.unwrap(),
- outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
- next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
- next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(),
- inbound_capacity_msat: inbound_capacity_msat.0.unwrap(),
- confirmations_required,
- confirmations,
- force_close_spend_delay,
- is_outbound: is_outbound.0.unwrap(),
- is_channel_ready: is_channel_ready.0.unwrap(),
- is_usable: is_usable.0.unwrap(),
- is_public: is_public.0.unwrap(),
- inbound_htlc_minimum_msat,
- inbound_htlc_maximum_msat,
- feerate_sat_per_1000_weight,
- channel_shutdown_state,
- pending_inbound_htlcs: pending_inbound_htlcs.unwrap_or(Vec::new()),
- pending_outbound_htlcs: pending_outbound_htlcs.unwrap_or(Vec::new()),
- })
- }
-}
-
impl_writeable_tlv_based!(PhantomRouteHints, {
(2, channels, required_vec),
(4, phantom_scid, required),
}
}
-impl_writeable_tlv_based_enum!(ChannelShutdownState,
- (0, NotShuttingDown) => {},
- (2, ShutdownInitiated) => {},
- (4, ResolvingHTLCs) => {},
- (6, NegotiatingClosingFee) => {},
- (8, ShutdownComplete) => {}, ;
-);
-
/// Arguments for the creation of a ChannelManager that are not deserialized.
///
/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
node_signer: args.node_signer,
signer_provider: args.signer_provider,
+ last_days_feerates: Mutex::new(VecDeque::new()),
+
logger: args.logger,
default_configuration: args.default_config,
};
nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
// Confirm that the channel_update was not sent immediately to node[1] but was cached.
let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-
- nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+ let error_message = "Channel force-closed";
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
{
// Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
let channel_id = ChannelId::from_bytes([4; 32]);
let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
let intercept_id = InterceptId([0; 32]);
+ let error_message = "Channel force-closed";
// Test the API functions.
check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None, None), unkown_public_key);
check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
- check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key), unkown_public_key);
+ check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
- check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key), unkown_public_key);
+ check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
// Dummy values
let channel_id = ChannelId::from_bytes([4; 32]);
+ let error_message = "Channel force-closed";
// Test the API functions.
check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42));
check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
- check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
+ check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
- check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
+ check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id);
anchors_config.manually_accept_inbound_channels = true;
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let error_message = "Channel force-closed";
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap();
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
let events = nodes[1].node.get_and_clear_pending_events();
match events[0] {
Event::OpenChannelRequest { temporary_channel_id, .. } => {
- nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
}
_ => panic!("Unexpected event"),
}
let user_config = test_default_channel_config();
let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
let nodes = create_network(2, &node_cfg, &node_chanmgr);
+ let error_message = "Channel force-closed";
// Open a channel, immediately disconnect each other, and broadcast Alice's latest state.
let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast(&nodes[0], 1, true);
check_added_monitors(&nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
{
let txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);