use bitcoin::secp256k1;
use chain;
-use chain::{Confirm, Watch, BestBlock};
-use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
-use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
+use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock};
+use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
+use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
use chain::transaction::{OutPoint, TransactionData};
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use ln::{PaymentHash, PaymentPreimage, PaymentSecret};
-pub use ln::channel::CounterpartyForwardingInfo;
-use ln::channel::{Channel, ChannelError, ChannelUpdateStatus};
+use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
use ln::features::{InitFeatures, NodeFeatures};
-use routing::router::{Route, RouteHop};
+use routing::router::{Payee, Route, RouteHop, RoutePath, RouteParameters};
use ln::msgs;
use ln::msgs::NetAddress;
use ln::onion_utils;
use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner};
use util::config::UserConfig;
-use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use util::{byte_utils, events};
-use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
+use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
use util::chacha20::{ChaCha20, ChaChaReader};
-use util::logger::Logger;
+use util::logger::{Logger, Level};
use util::errors::APIError;
+use io;
use prelude::*;
use core::{cmp, mem};
use core::cell::RefCell;
-use std::io::{Cursor, Read};
-use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
+use io::{Cursor, Read};
+use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
use core::sync::atomic::{AtomicUsize, Ordering};
use core::time::Duration;
#[cfg(any(test, feature = "allow_wallclock_use"))]
use std::time::Instant;
use core::ops::Deref;
-use bitcoin::hashes::hex::ToHex;
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
payment_data: msgs::FinalOnionHopData,
incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
},
+ ReceiveKeysend {
+ payment_preimage: PaymentPreimage,
+ incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
+ },
}
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
}
/// Tracks the inbound corresponding to an outbound HTLC
-#[derive(Clone, PartialEq)]
+#[derive(Clone, Hash, PartialEq, Eq)]
pub(crate) struct HTLCPreviousHopData {
short_channel_id: u64,
htlc_id: u64,
outpoint: OutPoint,
}
-struct ClaimableHTLC {
- prev_hop: HTLCPreviousHopData,
- value: u64,
+enum OnionPayload {
/// Contains a total_msat (which may differ from value if this is a Multi-Path Payment) and a
/// payment_secret which prevents path-probing attacks and can associate different HTLCs which
/// are part of the same payment.
- payment_data: msgs::FinalOnionHopData,
+ Invoice(msgs::FinalOnionHopData),
+ /// Contains the payer-provided preimage.
+ Spontaneous(PaymentPreimage),
+}
+
+struct ClaimableHTLC {
+ prev_hop: HTLCPreviousHopData,
cltv_expiry: u32,
+ value: u64,
+ onion_payload: OnionPayload,
+}
+
+/// A payment identifier used to uniquely identify a payment to LDK.
+/// (C-not exported) as we just use [u8; 32] directly
+#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
+pub struct PaymentId(pub [u8; 32]);
+
+impl Writeable for PaymentId {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.0.write(w)
+ }
}
+impl Readable for PaymentId {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let buf: [u8; 32] = Readable::read(r)?;
+ Ok(PaymentId(buf))
+ }
+}
/// Tracks the inbound corresponding to an outbound HTLC
-#[derive(Clone, PartialEq)]
+#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
+#[derive(Clone, PartialEq, Eq)]
pub(crate) enum HTLCSource {
PreviousHopData(HTLCPreviousHopData),
OutboundRoute {
/// Technically we can recalculate this from the route, but we cache it here to avoid
/// doing a double-pass on route when we get a failure back
first_hop_htlc_msat: u64,
+ payment_id: PaymentId,
+ payment_secret: Option<PaymentSecret>,
+ payee: Option<Payee>,
},
}
+#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
+impl core::hash::Hash for HTLCSource {
+ fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
+ match self {
+ HTLCSource::PreviousHopData(prev_hop_data) => {
+ 0u8.hash(hasher);
+ prev_hop_data.hash(hasher);
+ },
+ HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat, payee } => {
+ 1u8.hash(hasher);
+ path.hash(hasher);
+ session_priv[..].hash(hasher);
+ payment_id.hash(hasher);
+ payment_secret.hash(hasher);
+ first_hop_htlc_msat.hash(hasher);
+ payee.hash(hasher);
+ },
+ }
+ }
+}
#[cfg(test)]
impl HTLCSource {
pub fn dummy() -> Self {
path: Vec::new(),
session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
first_hop_htlc_msat: 0,
+ payment_id: PaymentId([2; 32]),
+ payment_secret: None,
+ payee: None,
}
}
}
}
}
+/// Return value for claim_funds_from_hop
+enum ClaimFundsFromHop {
+ PrevHopForceClosed,
+ MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option<u64>),
+ Success(u64),
+ DuplicateClaim,
+}
+
type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
struct MsgHandleErrInternal {
err: msgs::LightningError,
+ chan_id: Option<([u8; 32], u64)>, // If Some a channel of ours has been closed
shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
}
impl MsgHandleErrInternal {
},
},
},
+ chan_id: None,
shutdown_finish: None,
}
}
err,
action: msgs::ErrorAction::IgnoreError,
},
+ chan_id: None,
shutdown_finish: None,
}
}
#[inline]
fn from_no_close(err: msgs::LightningError) -> Self {
- Self { err, shutdown_finish: None }
+ Self { err, chan_id: None, shutdown_finish: None }
}
#[inline]
- fn from_finish_shutdown(err: String, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
+ fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u64, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
Self {
err: LightningError {
err: err.clone(),
},
},
},
+ chan_id: Some((channel_id, user_channel_id)),
shutdown_finish: Some((shutdown_res, channel_update)),
}
}
fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
Self {
err: match err {
+ ChannelError::Warn(msg) => LightningError {
+ err: msg,
+ action: msgs::ErrorAction::IgnoreError,
+ },
ChannelError::Ignore(msg) => LightningError {
err: msg,
action: msgs::ErrorAction::IgnoreError,
},
},
},
+ chan_id: None,
shutdown_finish: None,
}
}
///
/// For users who don't want to bother doing their own payment preimage storage, we also store that
/// here.
+///
+/// Note that this struct will be removed entirely soon, in favor of storing no inbound payment data
+/// and instead encoding it in the payment secret.
struct PendingInboundPayment {
/// The payment secret that the sender must use for us to accept this payment
payment_secret: PaymentSecret,
min_value_msat: Option<u64>,
}
+/// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102
+/// and later, also stores information for retrying the payment.
+pub(crate) enum PendingOutboundPayment {
+ Legacy {
+ session_privs: HashSet<[u8; 32]>,
+ },
+ Retryable {
+ session_privs: HashSet<[u8; 32]>,
+ payment_hash: PaymentHash,
+ payment_secret: Option<PaymentSecret>,
+ pending_amt_msat: u64,
+ /// Used to track the fee paid. Only present if the payment was serialized on 0.0.103+.
+ pending_fee_msat: Option<u64>,
+ /// The total payment amount across all paths, used to verify that a retry is not overpaying.
+ total_msat: u64,
+ /// Our best known block height at the time this payment was initiated.
+ starting_block_height: u32,
+ },
+ /// When a pending payment is fulfilled, we continue tracking it until all pending HTLCs have
+ /// been resolved. This ensures we don't look up pending payments in ChannelMonitors on restart
+ /// and add a pending payment that was already fulfilled.
+ Fulfilled {
+ session_privs: HashSet<[u8; 32]>,
+ payment_hash: Option<PaymentHash>,
+ },
+}
+
+impl PendingOutboundPayment {
+ fn is_retryable(&self) -> bool {
+ match self {
+ PendingOutboundPayment::Retryable { .. } => true,
+ _ => false,
+ }
+ }
+ fn is_fulfilled(&self) -> bool {
+ match self {
+ PendingOutboundPayment::Fulfilled { .. } => true,
+ _ => false,
+ }
+ }
+ fn get_pending_fee_msat(&self) -> Option<u64> {
+ match self {
+ PendingOutboundPayment::Retryable { pending_fee_msat, .. } => pending_fee_msat.clone(),
+ _ => None,
+ }
+ }
+
+ fn payment_hash(&self) -> Option<PaymentHash> {
+ match self {
+ PendingOutboundPayment::Legacy { .. } => None,
+ PendingOutboundPayment::Retryable { payment_hash, .. } => Some(*payment_hash),
+ PendingOutboundPayment::Fulfilled { payment_hash, .. } => *payment_hash,
+ }
+ }
+
+ fn mark_fulfilled(&mut self) {
+ let mut session_privs = HashSet::new();
+ core::mem::swap(&mut session_privs, match self {
+ PendingOutboundPayment::Legacy { session_privs } |
+ PendingOutboundPayment::Retryable { session_privs, .. } |
+ PendingOutboundPayment::Fulfilled { session_privs, .. }
+ => session_privs
+ });
+ let payment_hash = self.payment_hash();
+ *self = PendingOutboundPayment::Fulfilled { session_privs, payment_hash };
+ }
+
+ /// panics if path is None and !self.is_fulfilled
+ fn remove(&mut self, session_priv: &[u8; 32], path: Option<&Vec<RouteHop>>) -> bool {
+ let remove_res = match self {
+ PendingOutboundPayment::Legacy { session_privs } |
+ PendingOutboundPayment::Retryable { session_privs, .. } |
+ PendingOutboundPayment::Fulfilled { session_privs, .. } => {
+ session_privs.remove(session_priv)
+ }
+ };
+ if remove_res {
+ if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self {
+ let path = path.expect("Fulfilling a payment should always come with a path");
+ let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
+ *pending_amt_msat -= path_last_hop.fee_msat;
+ if let Some(fee_msat) = pending_fee_msat.as_mut() {
+ *fee_msat -= path.get_path_fees();
+ }
+ }
+ }
+ remove_res
+ }
+
+ fn insert(&mut self, session_priv: [u8; 32], path: &Vec<RouteHop>) -> bool {
+ let insert_res = match self {
+ PendingOutboundPayment::Legacy { session_privs } |
+ PendingOutboundPayment::Retryable { session_privs, .. } => {
+ session_privs.insert(session_priv)
+ }
+ PendingOutboundPayment::Fulfilled { .. } => false
+ };
+ if insert_res {
+ if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self {
+ let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
+ *pending_amt_msat += path_last_hop.fee_msat;
+ if let Some(fee_msat) = pending_fee_msat.as_mut() {
+ *fee_msat += path.get_path_fees();
+ }
+ }
+ }
+ insert_res
+ }
+
+ fn remaining_parts(&self) -> usize {
+ match self {
+ PendingOutboundPayment::Legacy { session_privs } |
+ PendingOutboundPayment::Retryable { session_privs, .. } |
+ PendingOutboundPayment::Fulfilled { session_privs, .. } => {
+ session_privs.len()
+ }
+ }
+ }
+}
+
/// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
/// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
/// lifetimes). Other times you can afford a reference, which is more efficient, in which case
/// Locked *after* channel_state.
pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
- /// The session_priv bytes of outbound payments which are pending resolution.
+ /// The session_priv bytes and retry metadata of outbound payments which are pending resolution.
/// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors
/// (if the channel has been force-closed), however we track them here to prevent duplicative
- /// PaymentSent/PaymentFailed events. Specifically, in the case of a duplicative
+ /// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative
/// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice.
/// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s)
/// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents
/// after reloading from disk while replaying blocks against ChannelMonitors.
///
+ /// See `PendingOutboundPayment` documentation for more info.
+ ///
/// Locked *after* channel_state.
- pending_outbound_payments: Mutex<HashSet<[u8; 32]>>,
+ pending_outbound_payments: Mutex<HashMap<PaymentId, PendingOutboundPayment>>,
our_network_key: SecretKey,
our_network_pubkey: PublicKey,
/// Because adding or removing an entry is rare, we usually take an outer read lock and then
/// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
/// new channel.
+ ///
+ /// If also holding `channel_state` lock, must lock `channel_state` prior to `per_peer_state`.
per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
pending_events: Mutex<Vec<events::Event>>,
#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
+/// Information needed for constructing an invoice route hint for this channel.
+#[derive(Clone, Debug, PartialEq)]
+pub struct CounterpartyForwardingInfo {
+ /// Base routing fee in millisatoshis.
+ pub fee_base_msat: u32,
+ /// Amount in millionths of a satoshi the channel will charge per transferred satoshi.
+ pub fee_proportional_millionths: u32,
+ /// The minimum difference in cltv_expiry between an ingoing HTLC and its outgoing counterpart,
+ /// such that the outgoing HTLC is forwardable to this counterparty. See `msgs::ChannelUpdate`'s
+ /// `cltv_expiry_delta` for more details.
+ pub cltv_expiry_delta: u16,
+}
+
/// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`]
/// to better separate parameters.
#[derive(Clone, Debug, PartialEq)]
///
/// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat
pub unspendable_punishment_reserve: Option<u64>,
- /// The user_id passed in to create_channel, or 0 if the channel was inbound.
- pub user_id: u64,
+ /// The `user_channel_id` passed in to create_channel, or 0 if the channel was inbound.
+ pub user_channel_id: u64,
/// The available outbound capacity for sending HTLCs to the remote peer. This does not include
/// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
/// available for inclusion in new outbound HTLCs). This further does not include any pending
/// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the
/// case of Ok(())) or will send once channel_monitor_updated is called on the next-hop channel
/// with the latest update_id.
- PartialFailure(Vec<Result<(), APIError>>),
+ PartialFailure {
+ /// The errors themselves, in the same order as the route hops.
+ results: Vec<Result<(), APIError>>,
+ /// If some paths failed without irrevocably committing to the new HTLC(s), this will
+ /// contain a [`RouteParameters`] object which can be used to calculate a new route that
+ /// will pay all remaining unpaid balance.
+ failed_paths_retry: Option<RouteParameters>,
+ /// The payment id for the payment, which is now at least partially pending.
+ payment_id: PaymentId,
+ },
}
macro_rules! handle_error {
($self: ident, $internal: expr, $counterparty_node_id: expr) => {
match $internal {
Ok(msg) => Ok(msg),
- Err(MsgHandleErrInternal { err, shutdown_finish }) => {
+ Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
#[cfg(debug_assertions)]
{
// In testing, ensure there are no deadlocks where the lock is already held upon
// entering the macro.
assert!($self.channel_state.try_lock().is_ok());
+ assert!($self.pending_events.try_lock().is_ok());
}
let mut msg_events = Vec::with_capacity(2);
msg: update
});
}
+ if let Some((channel_id, user_channel_id)) = chan_id {
+ $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed {
+ channel_id, user_channel_id,
+ reason: ClosureReason::ProcessingError { err: err.err.clone() }
+ });
+ }
}
log_error!($self.logger, "{}", err.err);
macro_rules! convert_chan_err {
($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => {
match $err {
+ ChannelError::Warn(msg) => {
+ //TODO: Once warning messages are merged, we should send a `warning` message to our
+ //peer here.
+ (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
+ },
ChannelError::Ignore(msg) => {
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
},
$short_to_id.remove(&short_id);
}
let shutdown_res = $channel.force_shutdown(true);
- (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
+ (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
+ shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
},
ChannelError::CloseDelayBroadcast(msg) => {
log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
$short_to_id.remove(&short_id);
}
let shutdown_res = $channel.force_shutdown(false);
- (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
+ (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
+ shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
}
}
}
}
}
+macro_rules! remove_channel {
+ ($channel_state: expr, $entry: expr) => {
+ {
+ let channel = $entry.remove_entry().1;
+ if let Some(short_id) = channel.get_short_channel_id() {
+ $channel_state.short_to_id.remove(&short_id);
+ }
+ channel
+ }
+ }
+}
+
macro_rules! handle_monitor_err {
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
};
- ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $chan_id: expr) => {
+ ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
match $err {
ChannelMonitorUpdateErr::PermanentFailure => {
log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
// splitting hairs we'd prefer to claim payments that were to us, but we haven't
// given up the preimage yet, so might as well just wait until the payment is
// retried, avoiding the on-chain fees.
- let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id,
+ let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(),
$chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() ));
(res, true)
},
ChannelMonitorUpdateErr::TemporaryFailure => {
- log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails",
+ log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
log_bytes!($chan_id[..]),
if $resend_commitment && $resend_raa {
match $action_type {
else if $resend_raa { "RAA" }
else { "nothing" },
(&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(),
- (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len());
+ (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len(),
+ (&$failed_finalized_fulfills as &Vec<HTLCSource>).len());
if !$resend_commitment {
debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
}
if !$resend_raa {
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
}
- $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
+ $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
(Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
},
}
};
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { {
- let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $entry.key());
+ ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
+ let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
if drop {
$entry.remove_entry();
}
res
} };
+ ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
+ handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, Vec::new())
+ }
}
macro_rules! return_monitor_err {
pending_msg_events: Vec::new(),
}),
pending_inbound_payments: Mutex::new(HashMap::new()),
- pending_outbound_payments: Mutex::new(HashSet::new()),
+ pending_outbound_payments: Mutex::new(HashMap::new()),
our_network_key: keys_manager.get_node_secret(),
our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()),
/// Creates a new outbound channel to the given remote node and with the given value.
///
- /// user_id will be provided back as user_channel_id in FundingGenerationReady events to allow
- /// tracking of which events correspond with which create_channel call. Note that the
- /// user_channel_id defaults to 0 for inbound channels, so you may wish to avoid using 0 for
- /// user_id here. user_id has no meaning inside of LDK, it is simply copied to events and
- /// otherwise ignored.
+ /// `user_channel_id` will be provided back as in
+ /// [`Event::FundingGenerationReady::user_channel_id`] to allow tracking of which events
+ /// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to 0
+ /// for inbound channels, so you may wish to avoid using 0 for `user_channel_id` here.
+ /// `user_channel_id` has no meaning inside of LDK, it is simply copied to events and otherwise
+ /// ignored.
///
- /// If successful, will generate a SendOpenChannel message event, so you should probably poll
- /// PeerManager::process_events afterwards.
+ /// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is
+ /// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`.
///
- /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is
- /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000.
- pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, override_config: Option<UserConfig>) -> Result<(), APIError> {
+ /// Note that we do not check if you are currently connected to the given peer. If no
+ /// connection is available, the outbound `open_channel` message may fail to send, resulting in
+ /// the channel eventually being silently forgotten (dropped on reload).
+ ///
+ /// Returns the new Channel's temporary `channel_id`. This ID will appear as
+ /// [`Event::FundingGenerationReady::temporary_channel_id`] and in
+ /// [`ChannelDetails::channel_id`] until after
+ /// [`ChannelManager::funding_transaction_generated`] is called, swapping the Channel's ID for
+ /// one derived from the funding transaction's TXID. If the counterparty rejects the channel
+ /// immediately, this temporary ID will appear in [`Event::ChannelClosed::channel_id`].
+ ///
+ /// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id
+ /// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id
+ /// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id
+ pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u64, override_config: Option<UserConfig>) -> Result<[u8; 32], APIError> {
if channel_value_satoshis < 1000 {
return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
}
- let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
- let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, config)?;
+ let channel = {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ match per_peer_state.get(&their_network_key) {
+ Some(peer_state) => {
+ let peer_state = peer_state.lock().unwrap();
+ let their_features = &peer_state.latest_features;
+ let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
+ Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features,
+ channel_value_satoshis, push_msat, user_channel_id, config, self.best_block.read().unwrap().height())?
+ },
+ None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }),
+ }
+ };
let res = channel.get_open_channel(self.genesis_hash.clone());
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
// We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
debug_assert!(&self.total_consistency_lock.try_write().is_err());
+ let temporary_channel_id = channel.channel_id();
let mut channel_state = self.channel_state.lock().unwrap();
- match channel_state.by_id.entry(channel.channel_id()) {
+ match channel_state.by_id.entry(temporary_channel_id) {
hash_map::Entry::Occupied(_) => {
if cfg!(feature = "fuzztarget") {
return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
node_id: their_network_key,
msg: res,
});
- Ok(())
+ Ok(temporary_channel_id)
}
fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<Signer>)) -> bool>(&self, f: Fn) -> Vec<ChannelDetails> {
unspendable_punishment_reserve: to_self_reserve_satoshis,
inbound_capacity_msat,
outbound_capacity_msat,
- user_id: channel.get_user_id(),
+ user_channel_id: channel.get_user_id(),
confirmations_required: channel.minimum_depth(),
force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
is_outbound: channel.is_outbound(),
self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
}
- /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
- /// will be accepted on the given channel, and after additional timeout/the closing of all
- /// pending HTLCs, the channel will be closed on chain.
- ///
- /// May generate a SendShutdown message event on success, which should be relayed.
- pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+ /// Helper function that issues the channel close events
+ fn issue_channel_close_events(&self, channel: &Channel<Signer>, closure_reason: ClosureReason) {
+ let mut pending_events_lock = self.pending_events.lock().unwrap();
+ match channel.unbroadcasted_funding() {
+ Some(transaction) => {
+ pending_events_lock.push(events::Event::DiscardFunding { channel_id: channel.channel_id(), transaction })
+ },
+ None => {},
+ }
+ pending_events_lock.push(events::Event::ChannelClosed {
+ channel_id: channel.channel_id(),
+ user_channel_id: channel.get_user_id(),
+ reason: closure_reason
+ });
+ }
+
+ fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let (mut failed_htlcs, chan_option) = {
+ let counterparty_node_id;
+ let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
+ let result: Result<(), _> = loop {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
match channel_state.by_id.entry(channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
- let (shutdown_msg, failed_htlcs) = chan_entry.get_mut().get_shutdown()?;
+ counterparty_node_id = chan_entry.get().get_counterparty_node_id();
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) {
+ Some(peer_state) => {
+ let peer_state = peer_state.lock().unwrap();
+ let their_features = &peer_state.latest_features;
+ chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
+ },
+ None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+ };
+ failed_htlcs = htlcs;
+
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update {
+ if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
+ let (result, is_permanent) =
+ handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), Vec::new(), chan_entry.key());
+ if is_permanent {
+ remove_channel!(channel_state, chan_entry);
+ break result;
+ }
+ }
+ }
+
channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: chan_entry.get().get_counterparty_node_id(),
+ node_id: counterparty_node_id,
msg: shutdown_msg
});
+
if chan_entry.get().is_shutdown() {
- if let Some(short_id) = chan_entry.get().get_short_channel_id() {
- channel_state.short_to_id.remove(&short_id);
+ let channel = remove_channel!(channel_state, chan_entry);
+ if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: channel_update
+ });
}
- (failed_htlcs, Some(chan_entry.remove_entry().1))
- } else { (failed_htlcs, None) }
+ self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed);
+ }
+ break Ok(());
},
hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()})
}
};
+
for htlc_source in failed_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
- let chan_update = if let Some(chan) = chan_option {
- self.get_channel_update_for_broadcast(&chan).ok()
- } else { None };
-
- if let Some(update) = chan_update {
- let mut channel_state = self.channel_state.lock().unwrap();
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
+ let _ = handle_error!(self, result, counterparty_node_id);
Ok(())
}
+ /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
+ /// will be accepted on the given channel, and after additional timeout/the closing of all
+ /// pending HTLCs, the channel will be closed on chain.
+ ///
+ /// * If we are the channel initiator, we will pay between our [`Background`] and
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
+ /// estimate.
+ /// * If our counterparty is the channel initiator, we will require a channel closing
+ /// transaction feerate of at least our [`Background`] feerate or the feerate which
+ /// would appear on a force-closure transaction, whichever is lower. We will allow our
+ /// counterparty to pay as much fee as they'd like, however.
+ ///
+ /// May generate a SendShutdown message event on success, which should be relayed.
+ ///
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
+ /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
+ /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+ pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+ self.close_channel_internal(channel_id, None)
+ }
+
+ /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
+ /// will be accepted on the given channel, and after additional timeout/the closing of all
+ /// pending HTLCs, the channel will be closed on chain.
+ ///
+ /// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated
+ /// the channel being closed or not:
+ /// * If we are the channel initiator, we will pay at least this feerate on the closing
+ /// transaction. The upper-bound is set by
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
+ /// estimate (or `target_feerate_sat_per_1000_weight`, if it is greater).
+ /// * If our counterparty is the channel initiator, we will refuse to accept a channel closure
+ /// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
+ /// will appear on a force-closure transaction, whichever is lower).
+ ///
+ /// May generate a SendShutdown message event on success, which should be relayed.
+ ///
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
+ /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
+ /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+ pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
+ self.close_channel_internal(channel_id, Some(target_feerate_sats_per_1000_weight))
+ }
+
#[inline]
fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
let (monitor_update_option, mut failed_htlcs) = shutdown_res;
}
}
- fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
+ /// `peer_node_id` should be set when we receive a message from a peer, but not set when the
+ /// user closes, which will be re-exposed as the `ChannelClosed` reason.
+ fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
let mut chan = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if let Some(short_id) = chan.get().get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
}
+ if peer_node_id.is_some() {
+ if let Some(peer_msg) = peer_msg {
+ self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
+ }
+ } else {
+ self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
+ }
chan.remove_entry().1
} else {
return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
/// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- match self.force_close_channel_with_peer(channel_id, None) {
+ match self.force_close_channel_with_peer(channel_id, None, None) {
Ok(counterparty_node_id) => {
self.channel_state.lock().unwrap().pending_msg_events.push(
events::MessageSendEvent::HandleError {
let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) };
- let (next_hop_data, next_hop_hmac) = {
- match msgs::OnionHopData::read(&mut chacha_stream) {
+ let (next_hop_data, next_hop_hmac): (msgs::OnionHopData, _) = {
+ match <msgs::OnionHopData as Readable>::read(&mut chacha_stream) {
Err(err) => {
let error_code = match err {
msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
};
let pending_forward_info = if next_hop_hmac == [0; 32] {
- #[cfg(test)]
- {
- // In tests, make sure that the initial onion pcket data is, at least, non-0.
- // We could do some fancy randomness test here, but, ehh, whatever.
- // This checks for the issue where you can calculate the path length given the
- // onion data as all the path entries that the originator sent will be here
- // as-is (and were originally 0s).
- // Of course reverse path calculation is still pretty easy given naive routing
- // algorithms, but this fixes the most-obvious case.
- let mut next_bytes = [0; 32];
- chacha_stream.read_exact(&mut next_bytes).unwrap();
- assert_ne!(next_bytes[..], [0; 32][..]);
- chacha_stream.read_exact(&mut next_bytes).unwrap();
- assert_ne!(next_bytes[..], [0; 32][..]);
- }
-
- // OUR PAYMENT!
- // final_expiry_too_soon
- // We have to have some headroom to broadcast on chain if we have the preimage, so make sure we have at least
- // HTLC_FAIL_BACK_BUFFER blocks to go.
- // Also, ensure that, in the case of an unknown payment hash, our payment logic has enough time to fail the HTLC backward
- // before our onchain logic triggers a channel closure (see HTLC_FAIL_BACK_BUFFER rational).
- if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
- return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
- }
- // final_incorrect_htlc_amount
- if next_hop_data.amt_to_forward > msg.amount_msat {
- return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
- }
- // final_incorrect_cltv_expiry
- if next_hop_data.outgoing_cltv_value != msg.cltv_expiry {
- return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
- }
-
- let payment_data = match next_hop_data.format {
- msgs::OnionHopDataFormat::Legacy { .. } => None,
- msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]),
- msgs::OnionHopDataFormat::FinalNode { payment_data } => payment_data,
- };
+ #[cfg(test)]
+ {
+ // In tests, make sure that the initial onion pcket data is, at least, non-0.
+ // We could do some fancy randomness test here, but, ehh, whatever.
+ // This checks for the issue where you can calculate the path length given the
+ // onion data as all the path entries that the originator sent will be here
+ // as-is (and were originally 0s).
+ // Of course reverse path calculation is still pretty easy given naive routing
+ // algorithms, but this fixes the most-obvious case.
+ let mut next_bytes = [0; 32];
+ chacha_stream.read_exact(&mut next_bytes).unwrap();
+ assert_ne!(next_bytes[..], [0; 32][..]);
+ chacha_stream.read_exact(&mut next_bytes).unwrap();
+ assert_ne!(next_bytes[..], [0; 32][..]);
+ }
- if payment_data.is_none() {
- return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]);
- }
+ // OUR PAYMENT!
+ // final_expiry_too_soon
+ // We have to have some headroom to broadcast on chain if we have the preimage, so make sure
+ // we have at least HTLC_FAIL_BACK_BUFFER blocks to go.
+ // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
+ // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
+ // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
+ if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
+ return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
+ }
+ // final_incorrect_htlc_amount
+ if next_hop_data.amt_to_forward > msg.amount_msat {
+ return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
+ }
+ // final_incorrect_cltv_expiry
+ if next_hop_data.outgoing_cltv_value != msg.cltv_expiry {
+ return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
+ }
- // Note that we could obviously respond immediately with an update_fulfill_htlc
- // message, however that would leak that we are the recipient of this payment, so
- // instead we stay symmetric with the forwarding case, only responding (after a
- // delay) once they've send us a commitment_signed!
+ let routing = match next_hop_data.format {
+ msgs::OnionHopDataFormat::Legacy { .. } => return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]),
+ msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]),
+ msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage } => {
+ if payment_data.is_some() && keysend_preimage.is_some() {
+ return_err!("We don't support MPP keysend payments", 0x4000|22, &[0;0]);
+ } else if let Some(data) = payment_data {
+ PendingHTLCRouting::Receive {
+ payment_data: data,
+ incoming_cltv_expiry: msg.cltv_expiry,
+ }
+ } else if let Some(payment_preimage) = keysend_preimage {
+ // We need to check that the sender knows the keysend preimage before processing this
+ // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
+ // could discover the final destination of X, by probing the adjacent nodes on the route
+ // with a keysend payment of identical payment hash to X and observing the processing
+ // time discrepancies due to a hash collision with X.
+ let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
+ if hashed_preimage != msg.payment_hash {
+ return_err!("Payment preimage didn't match payment hash", 0x4000|22, &[0;0]);
+ }
- PendingHTLCStatus::Forward(PendingHTLCInfo {
- routing: PendingHTLCRouting::Receive {
- payment_data: payment_data.unwrap(),
- incoming_cltv_expiry: msg.cltv_expiry,
- },
- payment_hash: msg.payment_hash.clone(),
- incoming_shared_secret: shared_secret,
- amt_to_forward: next_hop_data.amt_to_forward,
- outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
- })
- } else {
- let mut new_packet_data = [0; 20*65];
- let read_pos = chacha_stream.read(&mut new_packet_data).unwrap();
- #[cfg(debug_assertions)]
- {
- // Check two things:
- // a) that the behavior of our stream here will return Ok(0) even if the TLV
- // read above emptied out our buffer and the unwrap() wont needlessly panic
- // b) that we didn't somehow magically end up with extra data.
- let mut t = [0; 1];
- debug_assert!(chacha_stream.read(&mut t).unwrap() == 0);
- }
- // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
- // fill the onion hop data we'll forward to our next-hop peer.
- chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]);
+ PendingHTLCRouting::ReceiveKeysend {
+ payment_preimage,
+ incoming_cltv_expiry: msg.cltv_expiry,
+ }
+ } else {
+ return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]);
+ }
+ },
+ };
- let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
+ // Note that we could obviously respond immediately with an update_fulfill_htlc
+ // message, however that would leak that we are the recipient of this payment, so
+ // instead we stay symmetric with the forwarding case, only responding (after a
+ // delay) once they've send us a commitment_signed!
+
+ PendingHTLCStatus::Forward(PendingHTLCInfo {
+ routing,
+ payment_hash: msg.payment_hash.clone(),
+ incoming_shared_secret: shared_secret,
+ amt_to_forward: next_hop_data.amt_to_forward,
+ outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
+ })
+ } else {
+ let mut new_packet_data = [0; 20*65];
+ let read_pos = chacha_stream.read(&mut new_packet_data).unwrap();
+ #[cfg(debug_assertions)]
+ {
+ // Check two things:
+ // a) that the behavior of our stream here will return Ok(0) even if the TLV
+ // read above emptied out our buffer and the unwrap() wont needlessly panic
+ // b) that we didn't somehow magically end up with extra data.
+ let mut t = [0; 1];
+ debug_assert!(chacha_stream.read(&mut t).unwrap() == 0);
+ }
+ // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
+ // fill the onion hop data we'll forward to our next-hop peer.
+ chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]);
- let blinding_factor = {
- let mut sha = Sha256::engine();
- sha.input(&new_pubkey.serialize()[..]);
- sha.input(&shared_secret);
- Sha256::from_engine(sha).into_inner()
- };
+ let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
- let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
- Err(e)
- } else { Ok(new_pubkey) };
+ let blinding_factor = {
+ let mut sha = Sha256::engine();
+ sha.input(&new_pubkey.serialize()[..]);
+ sha.input(&shared_secret);
+ Sha256::from_engine(sha).into_inner()
+ };
- let outgoing_packet = msgs::OnionPacket {
- version: 0,
- public_key,
- hop_data: new_packet_data,
- hmac: next_hop_hmac.clone(),
- };
+ let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
+ Err(e)
+ } else { Ok(new_pubkey) };
- let short_channel_id = match next_hop_data.format {
- msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
- msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
- msgs::OnionHopDataFormat::FinalNode { .. } => {
- return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
- },
- };
+ let outgoing_packet = msgs::OnionPacket {
+ version: 0,
+ public_key,
+ hop_data: new_packet_data,
+ hmac: next_hop_hmac.clone(),
+ };
- PendingHTLCStatus::Forward(PendingHTLCInfo {
- routing: PendingHTLCRouting::Forward {
- onion_packet: outgoing_packet,
- short_channel_id,
- },
- payment_hash: msg.payment_hash.clone(),
- incoming_shared_secret: shared_secret,
- amt_to_forward: next_hop_data.amt_to_forward,
- outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
- })
+ let short_channel_id = match next_hop_data.format {
+ msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
+ msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
+ msgs::OnionHopDataFormat::FinalNode { .. } => {
+ return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
+ },
};
+ PendingHTLCStatus::Forward(PendingHTLCInfo {
+ routing: PendingHTLCRouting::Forward {
+ onion_packet: outgoing_packet,
+ short_channel_id,
+ },
+ payment_hash: msg.payment_hash.clone(),
+ incoming_shared_secret: shared_secret,
+ amt_to_forward: next_hop_data.amt_to_forward,
+ outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
+ })
+ };
+
channel_state = Some(self.channel_state.lock().unwrap());
if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
// If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
// short_channel_id is non-0 in any ::Forward.
if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
- let forwarding_id = match id_option {
- None => { // unknown_next_peer
- return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
- },
- Some(id) => id.clone(),
- };
if let Some((err, code, chan_update)) = loop {
+ let forwarding_id = match id_option {
+ None => { // unknown_next_peer
+ break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+ },
+ Some(id) => id.clone(),
+ };
+
let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
+ if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
+ // Note that the behavior here should be identical to the above block - we
+ // should NOT reveal the existence or non-existence of a private channel if
+ // we don't allow forwards outbound over them.
+ break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+ }
+
// Note that we could technically not return an error yet here and just hope
// that the connection is reestablished or monitor updated by the time we get
// around to doing the actual forward, but better to fail early if we can and
break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap())));
}
let cur_height = self.best_block.read().unwrap().height() + 1;
- // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
- // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
+ // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
+ // but we want to be robust wrt to counterparty packet sanitization (see
+ // HTLC_FAIL_BACK_BUFFER rationale).
if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
}
if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
break Some(("CLTV expiry is too far in the future", 21, None));
}
- // In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
- // But, to be safe against policy reception, we use a longer delay.
- if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 {
+ // If the HTLC expires ~now, don't bother trying to forward it to our
+ // counterparty. They should fail it anyway, but we don't want to bother with
+ // the round-trips or risk them deciding they definitely want the HTLC and
+ // force-closing to ensure they get it if we're offline.
+ // We previously had a much more aggressive check here which tried to ensure
+ // our counterparty receives an HTLC which has *our* risk threshold met on it,
+ // but there is no need to do that, and since we're a bit conservative with our
+ // risk threshold it just results in failing to forward payments.
+ if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
}
}
// Only public for testing, this should otherwise never be called direcly
- pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32) -> Result<(), APIError> {
+ pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payee: &Option<Payee>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>) -> Result<(), APIError> {
log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
let prng_seed = self.keys_manager.get_secure_random_bytes();
let session_priv_bytes = self.keys_manager.get_secure_random_bytes();
let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
.map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?;
- let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height)?;
+ let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?;
if onion_utils::route_size_insane(&onion_payloads) {
return Err(APIError::RouteError{err: "Route size too large considering onion data"});
}
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- assert!(self.pending_outbound_payments.lock().unwrap().insert(session_priv_bytes));
let err: Result<(), _> = loop {
let mut channel_lock = self.channel_state.lock().unwrap();
+
+ let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
+ let payment_entry = pending_outbounds.entry(payment_id);
+ if let hash_map::Entry::Occupied(payment) = &payment_entry {
+ if !payment.get().is_retryable() {
+ return Err(APIError::RouteError {
+ err: "Payment already completed"
+ });
+ }
+ }
+
let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) {
None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
Some(id) => id.clone(),
};
+ macro_rules! insert_outbound_payment {
+ () => {
+ let payment = payment_entry.or_insert_with(|| PendingOutboundPayment::Retryable {
+ session_privs: HashSet::new(),
+ pending_amt_msat: 0,
+ pending_fee_msat: Some(0),
+ payment_hash: *payment_hash,
+ payment_secret: *payment_secret,
+ starting_block_height: self.best_block.read().unwrap().height(),
+ total_msat: total_value,
+ });
+ assert!(payment.insert(session_priv_bytes, path));
+ }
+ }
+
let channel_state = &mut *channel_lock;
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
match {
if !chan.get().is_live() {
return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
}
- break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
- path: path.clone(),
- session_priv: session_priv.clone(),
- first_hop_htlc_msat: htlc_msat,
- }, onion_packet, &self.logger), channel_state, chan)
+ break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
+ htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+ path: path.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: htlc_msat,
+ payment_id,
+ payment_secret: payment_secret.clone(),
+ payee: payee.clone(),
+ }, onion_packet, &self.logger),
+ channel_state, chan)
} {
Some((update_add, commitment_signed, monitor_update)) => {
if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
// is restored. Therefore, we must return an error indicating that
// it is unsafe to retry the payment wholesale, which we do in the
// send_payment check for MonitorUpdateFailed, below.
+ insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above.
return Err(APIError::MonitorUpdateFailed);
}
+ insert_outbound_payment!();
log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
},
});
},
- None => {},
+ None => { insert_outbound_payment!(); },
}
} else { unreachable!(); }
return Ok(());
/// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature
/// bit set (either as required or as available). If multiple paths are present in the Route,
/// we assume the invoice had the basic_mpp feature set.
- pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<(), PaymentSendFailure> {
+ pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<PaymentId, PaymentSendFailure> {
+ self.send_payment_internal(route, payment_hash, payment_secret, None, None, None)
+ }
+
+ fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: Option<PaymentId>, recv_value_msat: Option<u64>) -> Result<PaymentId, PaymentSendFailure> {
if route.paths.len() < 1 {
return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"}));
}
// for now more than 10 paths likely carries too much one-path failure.
return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"}));
}
+ if payment_secret.is_none() && route.paths.len() > 1 {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()}));
+ }
let mut total_value = 0;
let our_node_id = self.get_our_node_id();
let mut path_errs = Vec::with_capacity(route.paths.len());
+ let payment_id = if let Some(id) = payment_id { id } else { PaymentId(self.keys_manager.get_secure_random_bytes()) };
'path_check: for path in route.paths.iter() {
if path.len() < 1 || path.len() > 20 {
path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"}));
if path_errs.iter().any(|e| e.is_err()) {
return Err(PaymentSendFailure::PathParameterError(path_errs));
}
+ if let Some(amt_msat) = recv_value_msat {
+ debug_assert!(amt_msat >= total_value);
+ total_value = amt_msat;
+ }
let cur_height = self.best_block.read().unwrap().height() + 1;
let mut results = Vec::new();
for path in route.paths.iter() {
- results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height));
+ results.push(self.send_payment_along_path(&path, &route.payee, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage));
}
let mut has_ok = false;
let mut has_err = false;
- for res in results.iter() {
+ let mut pending_amt_unsent = 0;
+ let mut max_unsent_cltv_delta = 0;
+ for (res, path) in results.iter().zip(route.paths.iter()) {
if res.is_ok() { has_ok = true; }
if res.is_err() { has_err = true; }
if let &Err(APIError::MonitorUpdateFailed) = res {
// PartialFailure.
has_err = true;
has_ok = true;
- break;
+ } else if res.is_err() {
+ pending_amt_unsent += path.last().unwrap().fee_msat;
+ max_unsent_cltv_delta = cmp::max(max_unsent_cltv_delta, path.last().unwrap().cltv_expiry_delta);
}
}
if has_err && has_ok {
- Err(PaymentSendFailure::PartialFailure(results))
+ Err(PaymentSendFailure::PartialFailure {
+ results,
+ payment_id,
+ failed_paths_retry: if pending_amt_unsent != 0 {
+ if let Some(payee) = &route.payee {
+ Some(RouteParameters {
+ payee: payee.clone(),
+ final_value_msat: pending_amt_unsent,
+ final_cltv_expiry_delta: max_unsent_cltv_delta,
+ })
+ } else { None }
+ } else { None },
+ })
} else if has_err {
+ // If we failed to send any paths, we shouldn't have inserted the new PaymentId into
+ // our `pending_outbound_payments` map at all.
+ debug_assert!(self.pending_outbound_payments.lock().unwrap().get(&payment_id).is_none());
Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect()))
} else {
- Ok(())
+ Ok(payment_id)
+ }
+ }
+
+ /// Retries a payment along the given [`Route`].
+ ///
+ /// Errors returned are a superset of those returned from [`send_payment`], so see
+ /// [`send_payment`] documentation for more details on errors. This method will also error if the
+ /// retry amount puts the payment more than 10% over the payment's total amount, or if the payment
+ /// for the given `payment_id` cannot be found (likely due to timeout or success).
+ ///
+ /// [`send_payment`]: [`ChannelManager::send_payment`]
+ pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
+ const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
+ for path in route.paths.iter() {
+ if path.len() == 0 {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: "length-0 path in route".to_string()
+ }))
+ }
+ }
+
+ let (total_msat, payment_hash, payment_secret) = {
+ let outbounds = self.pending_outbound_payments.lock().unwrap();
+ if let Some(payment) = outbounds.get(&payment_id) {
+ match payment {
+ PendingOutboundPayment::Retryable {
+ total_msat, payment_hash, payment_secret, pending_amt_msat, ..
+ } => {
+ let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum();
+ if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string()
+ }))
+ }
+ (*total_msat, *payment_hash, *payment_secret)
+ },
+ PendingOutboundPayment::Legacy { .. } => {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string()
+ }))
+ },
+ PendingOutboundPayment::Fulfilled { .. } => {
+ return Err(PaymentSendFailure::ParameterError(APIError::RouteError {
+ err: "Payment already completed"
+ }));
+ },
+ }
+ } else {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)),
+ }))
+ }
+ };
+ return self.send_payment_internal(route, payment_hash, &payment_secret, None, Some(payment_id), Some(total_msat)).map(|_| ())
+ }
+
+ /// Send a spontaneous payment, which is a payment that does not require the recipient to have
+ /// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify
+ /// the preimage, it must be a cryptographically secure random value that no intermediate node
+ /// would be able to guess -- otherwise, an intermediate node may claim the payment and it will
+ /// never reach the recipient.
+ ///
+ /// See [`send_payment`] documentation for more details on the return value of this function.
+ ///
+ /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
+ /// [`send_payment`] for more information about the risks of duplicate preimage usage.
+ ///
+ /// Note that `route` must have exactly one path.
+ ///
+ /// [`send_payment`]: Self::send_payment
+ pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
+ let preimage = match payment_preimage {
+ Some(p) => p,
+ None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()),
+ };
+ let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner());
+ match self.send_payment_internal(route, payment_hash, &None, Some(preimage), None, None) {
+ Ok(payment_id) => Ok((payment_hash, payment_id)),
+ Err(e) => Err(e)
}
}
(chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
.map_err(|e| if let ChannelError::Close(msg) = e {
- MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(true), None)
+ MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
} else { unreachable!(); })
, chan)
},
/// Returns an [`APIError::APIMisuseError`] if the funding_transaction spent non-SegWit outputs
/// or if no output was found which matches the parameters in [`Event::FundingGenerationReady`].
///
- /// Panics if a funding transaction has already been provided for this channel.
+ /// Returns [`APIError::ChannelUnavailable`] if a funding transaction has already been provided
+ /// for the channel or if the channel has been closed as indicated by [`Event::ChannelClosed`].
///
/// May panic if the output found in the funding transaction is duplicative with some other
/// channel (note that this should be trivially prevented by using unique funding transaction
/// create a new channel with a conflicting funding transaction.
///
/// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady
+ /// [`Event::ChannelClosed`]: crate::util::events::Event::ChannelClosed
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
});
- match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet) {
+ match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) {
Err(e) => {
if let ChannelError::Ignore(msg) = e {
log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
// close channel and then send error message to peer.
let counterparty_node_id = chan.get().get_counterparty_node_id();
let err: Result<(), _> = match e {
- ChannelError::Ignore(_) => {
+ ChannelError::Ignore(_) | ChannelError::Warn(_) => {
panic!("Stated return value requirements in send_commitment() were not met");
- },
+ }
ChannelError::Close(msg) => {
log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
let (channel_id, mut channel) = chan.remove_entry();
if let Some(short_id) = channel.get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
}
- Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
+ // ChannelClosed event is generated by handle_error for us.
+ Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
},
ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
};
for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
- routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry },
- incoming_shared_secret, payment_hash, amt_to_forward, .. },
+ routing, incoming_shared_secret, payment_hash, amt_to_forward, .. },
prev_funding_outpoint } => {
+ let (cltv_expiry, onion_payload) = match routing {
+ PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry } =>
+ (incoming_cltv_expiry, OnionPayload::Invoice(payment_data)),
+ PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
+ (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage)),
+ _ => {
+ panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
+ }
+ };
let claimable_htlc = ClaimableHTLC {
prev_hop: HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
incoming_packet_shared_secret: incoming_shared_secret,
},
value: amt_to_forward,
- payment_data: payment_data.clone(),
- cltv_expiry: incoming_cltv_expiry,
+ cltv_expiry,
+ onion_payload,
};
macro_rules! fail_htlc {
let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
match payment_secrets.entry(payment_hash) {
hash_map::Entry::Vacant(_) => {
- log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc);
+ match claimable_htlc.onion_payload {
+ OnionPayload::Invoice(_) => {
+ log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc);
+ },
+ OnionPayload::Spontaneous(preimage) => {
+ match channel_state.claimable_htlcs.entry(payment_hash) {
+ hash_map::Entry::Vacant(e) => {
+ e.insert(vec![claimable_htlc]);
+ new_events.push(events::Event::PaymentReceived {
+ payment_hash,
+ amt: amt_to_forward,
+ purpose: events::PaymentPurpose::SpontaneousPayment(preimage),
+ });
+ },
+ hash_map::Entry::Occupied(_) => {
+ log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc);
+ }
+ }
+ }
+ }
},
hash_map::Entry::Occupied(inbound_payment) => {
+ let payment_data =
+ if let OnionPayload::Invoice(ref data) = claimable_htlc.onion_payload {
+ data.clone()
+ } else {
+ log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc);
+ continue
+ };
if inbound_payment.get().payment_secret != payment_data.payment_secret {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
fail_htlc!(claimable_htlc);
let mut total_value = 0;
let htlcs = channel_state.claimable_htlcs.entry(payment_hash)
.or_insert(Vec::new());
+ if htlcs.len() == 1 {
+ if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
+ log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc);
+ continue
+ }
+ }
htlcs.push(claimable_htlc);
for htlc in htlcs.iter() {
total_value += htlc.value;
- if htlc.payment_data.total_msat != payment_data.total_msat {
- log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
- log_bytes!(payment_hash.0), payment_data.total_msat, htlc.payment_data.total_msat);
- total_value = msgs::MAX_VALUE_MSAT;
+ match &htlc.onion_payload {
+ OnionPayload::Invoice(htlc_payment_data) => {
+ if htlc_payment_data.total_msat != payment_data.total_msat {
+ log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
+ log_bytes!(payment_hash.0), payment_data.total_msat, htlc_payment_data.total_msat);
+ total_value = msgs::MAX_VALUE_MSAT;
+ }
+ if total_value >= msgs::MAX_VALUE_MSAT { break; }
+ },
+ _ => unreachable!(),
}
- if total_value >= msgs::MAX_VALUE_MSAT { break; }
}
if total_value >= msgs::MAX_VALUE_MSAT || total_value > payment_data.total_msat {
log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
} else if total_value == payment_data.total_msat {
new_events.push(events::Event::PaymentReceived {
payment_hash,
- payment_preimage: inbound_payment.get().payment_preimage,
- payment_secret: payment_data.payment_secret,
+ purpose: events::PaymentPurpose::InvoicePayment {
+ payment_preimage: inbound_payment.get().payment_preimage,
+ payment_secret: payment_data.payment_secret,
+ },
amt: total_value,
- user_payment_id: inbound_payment.get().user_payment_id,
});
// Only ever generate at most one PaymentReceived
// per registered payment_hash, even if it isn't
},
};
},
- HTLCForwardInfo::AddHTLC { .. } => {
- panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
- },
HTLCForwardInfo::FailHTLC { .. } => {
panic!("Got pending fail of our own HTLC");
}
self.process_background_events();
}
- /// If a peer is disconnected we mark any channels with that peer as 'disabled'.
- /// After some time, if channels are still disabled we need to broadcast a ChannelUpdate
- /// to inform the network about the uselessness of these channels.
- ///
- /// This method handles all the details, and must be called roughly once per minute.
- ///
- /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
- pub fn timer_tick_occurred(&self) {
- PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut should_persist = NotifyOption::SkipPersist;
- if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
+ fn update_channel_fee(&self, short_to_id: &mut HashMap<u64, [u8; 32]>, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
+ if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); }
+ // If the feerate has decreased by less than half, don't bother
+ if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
+ log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
+ log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ return (true, NotifyOption::SkipPersist, Ok(()));
+ }
+ if !chan.is_live() {
+ log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
+ log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ return (true, NotifyOption::SkipPersist, Ok(()));
+ }
+ log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
+ log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- for (_, chan) in channel_state.by_id.iter_mut() {
- match chan.channel_update_status() {
- ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
- ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
- ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
- ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
- ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- should_persist = NotifyOption::DoPersist;
- chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
- },
- ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- should_persist = NotifyOption::DoPersist;
- chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
- },
- _ => {},
+ let mut retain_channel = true;
+ let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) {
+ Ok(res) => Ok(res),
+ Err(e) => {
+ let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+ if drop { retain_channel = false; }
+ Err(res)
+ }
+ };
+ let ret_err = match res {
+ Ok(Some((update_fee, commitment_signed, monitor_update))) => {
+ if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+ let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), Vec::new(), chan_id);
+ if drop { retain_channel = false; }
+ res
+ } else {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get_counterparty_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: Some(update_fee),
+ commitment_signed,
+ },
+ });
+ Ok(())
}
+ },
+ Ok(None) => Ok(()),
+ Err(e) => Err(e),
+ };
+ (retain_channel, NotifyOption::DoPersist, ret_err)
+ }
+
+ #[cfg(fuzzing)]
+ /// In chanmon_consistency we want to sometimes do the channel fee updates done in
+ /// timer_tick_occurred, but we can't generate the disabled channel updates as it considers
+ /// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what
+ /// it wants to detect). Thus, we have a variant exposed here for its benefit.
+ pub fn maybe_update_chan_fees(&self) {
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let mut should_persist = NotifyOption::SkipPersist;
+
+ let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+
+ let mut handle_errors = Vec::new();
+ {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+ let short_to_id = &mut channel_state.short_to_id;
+ channel_state.by_id.retain(|chan_id, chan| {
+ let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+ if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+ if err.is_err() {
+ handle_errors.push(err);
+ }
+ retain_channel
+ });
+ }
+
+ should_persist
+ });
+ }
+
+ /// Performs actions which should happen on startup and roughly once per minute thereafter.
+ ///
+ /// This currently includes:
+ /// * Increasing or decreasing the on-chain feerate estimates for our outbound channels,
+ /// * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more
+ /// than a minute, informing the network that they should no longer attempt to route over
+ /// the channel.
+ ///
+ /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
+ /// estimate fetches.
+ pub fn timer_tick_occurred(&self) {
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let mut should_persist = NotifyOption::SkipPersist;
+ if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
+
+ let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+
+ let mut handle_errors = Vec::new();
+ {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+ let short_to_id = &mut channel_state.short_to_id;
+ channel_state.by_id.retain(|chan_id, chan| {
+ let counterparty_node_id = chan.get_counterparty_node_id();
+ let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+ if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+ if err.is_err() {
+ handle_errors.push((err, counterparty_node_id));
+ }
+ if !retain_channel { return false; }
+
+ if let Err(e) = chan.timer_check_closing_negotiation_progress() {
+ let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+ handle_errors.push((Err(err), chan.get_counterparty_node_id()));
+ if needs_close { return false; }
+ }
+
+ match chan.channel_update_status() {
+ ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
+ ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
+ ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
+ ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
+ ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
+ },
+ ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
+ },
+ _ => {},
+ }
+
+ true
+ });
}
+ for (err, counterparty_node_id) in handle_errors.drain(..) {
+ let _ = handle_error!(self, err, counterparty_node_id);
+ }
should_persist
});
}
self.fail_htlc_backwards_internal(channel_state,
htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
},
- HTLCSource::OutboundRoute { session_priv, .. } => {
- if {
- let mut session_priv_bytes = [0; 32];
- session_priv_bytes.copy_from_slice(&session_priv[..]);
- self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
- } {
- self.pending_events.lock().unwrap().push(
- events::Event::PaymentFailed {
- payment_hash,
- rejected_by_dest: false,
-#[cfg(test)]
- error_code: None,
-#[cfg(test)]
- error_data: None,
- }
- )
+ HTLCSource::OutboundRoute { session_priv, payment_id, path, payee, .. } => {
+ let mut session_priv_bytes = [0; 32];
+ session_priv_bytes.copy_from_slice(&session_priv[..]);
+ let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+ if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
+ if payment.get_mut().remove(&session_priv_bytes, Some(&path)) && !payment.get().is_fulfilled() {
+ let retry = if let Some(payee_data) = payee {
+ let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
+ Some(RouteParameters {
+ payee: payee_data,
+ final_value_msat: path_last_hop.fee_msat,
+ final_cltv_expiry_delta: path_last_hop.cltv_expiry_delta,
+ })
+ } else { None };
+ self.pending_events.lock().unwrap().push(
+ events::Event::PaymentPathFailed {
+ payment_id: Some(payment_id),
+ payment_hash,
+ rejected_by_dest: false,
+ network_update: None,
+ all_paths_failed: payment.get().remaining_parts() == 0,
+ path: path.clone(),
+ short_channel_id: None,
+ retry,
+ #[cfg(test)]
+ error_code: None,
+ #[cfg(test)]
+ error_data: None,
+ }
+ );
+ }
} else {
log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
}
// from block_connected which may run during initialization prior to the chain_monitor
// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
match source {
- HTLCSource::OutboundRoute { ref path, session_priv, .. } => {
- if {
- let mut session_priv_bytes = [0; 32];
- session_priv_bytes.copy_from_slice(&session_priv[..]);
- !self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
- } {
+ HTLCSource::OutboundRoute { ref path, session_priv, payment_id, ref payee, .. } => {
+ let mut session_priv_bytes = [0; 32];
+ session_priv_bytes.copy_from_slice(&session_priv[..]);
+ let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+ let mut all_paths_failed = false;
+ if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
+ if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) {
+ log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
+ return;
+ }
+ if payment.get().is_fulfilled() {
+ log_trace!(self.logger, "Received failure of HTLC with payment_hash {} after payment completion", log_bytes!(payment_hash.0));
+ return;
+ }
+ if payment.get().remaining_parts() == 0 {
+ all_paths_failed = true;
+ }
+ } else {
log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
return;
}
- log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
mem::drop(channel_state_lock);
+ let retry = if let Some(payee_data) = payee {
+ let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
+ Some(RouteParameters {
+ payee: payee_data.clone(),
+ final_value_msat: path_last_hop.fee_msat,
+ final_cltv_expiry_delta: path_last_hop.cltv_expiry_delta,
+ })
+ } else { None };
+ log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
match &onion_error {
&HTLCFailReason::LightningError { ref err } => {
#[cfg(test)]
- let (channel_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
+ let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
#[cfg(not(test))]
- let (channel_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
+ let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
// TODO: If we decided to blame ourselves (or one of our channels) in
// process_onion_failure we should close that channel as it implies our
// next-hop is needlessly blaming us!
- if let Some(update) = channel_update {
- self.channel_state.lock().unwrap().pending_msg_events.push(
- events::MessageSendEvent::PaymentFailureNetworkUpdate {
- update,
- }
- );
- }
self.pending_events.lock().unwrap().push(
- events::Event::PaymentFailed {
+ events::Event::PaymentPathFailed {
+ payment_id: Some(payment_id),
payment_hash: payment_hash.clone(),
rejected_by_dest: !payment_retryable,
+ network_update,
+ all_paths_failed,
+ path: path.clone(),
+ short_channel_id,
+ retry,
#[cfg(test)]
error_code: onion_error_code,
#[cfg(test)]
ref data,
.. } => {
// we get a fail_malformed_htlc from the first hop
- // TODO: We'd like to generate a PaymentFailureNetworkUpdate for temporary
+ // TODO: We'd like to generate a NetworkUpdate for temporary
// failures here, but that would be insufficient as get_route
// generally ignores its view of our own channels as we provide them via
// ChannelDetails.
// TODO: For non-temporary failures, we really should be closing the
// channel here as we apparently can't relay through them anyway.
self.pending_events.lock().unwrap().push(
- events::Event::PaymentFailed {
+ events::Event::PaymentPathFailed {
+ payment_id: Some(payment_id),
payment_hash: payment_hash.clone(),
rejected_by_dest: path.len() == 1,
+ network_update: None,
+ all_paths_failed,
+ path: path.clone(),
+ short_channel_id: Some(path.first().unwrap().short_channel_id),
+ retry,
#[cfg(test)]
error_code: Some(*failure_code),
#[cfg(test)]
}
}
- /// Provides a payment preimage in response to a PaymentReceived event, returning true and
- /// generating message events for the net layer to claim the payment, if possible. Thus, you
- /// should probably kick the net layer to go send messages if this returns true!
+ /// Provides a payment preimage in response to [`Event::PaymentReceived`], generating any
+ /// [`MessageSendEvent`]s needed to claim the payment.
///
/// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
/// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentReceived`
/// event matches your expectation. If you fail to do so and call this method, you may provide
/// the sender "proof-of-payment" when they did not fulfill the full expected payment.
///
- /// May panic if called except in response to a PaymentReceived event.
+ /// Returns whether any HTLCs were claimed, and thus if any new [`MessageSendEvent`]s are now
+ /// pending for processing via [`get_and_clear_pending_msg_events`].
///
+ /// [`Event::PaymentReceived`]: crate::util::events::Event::PaymentReceived
/// [`create_inbound_payment`]: Self::create_inbound_payment
/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
+ /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool {
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
} else {
match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
- Err(Some(e)) => {
- if let msgs::ErrorAction::IgnoreError = e.1.err.action {
+ ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
+ if let msgs::ErrorAction::IgnoreError = err.err.action {
// We got a temporary failure updating monitor, but will claim the
// HTLC when the monitor updating is restored (or on chain).
- log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", e.1.err.err);
+ log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
claimed_any_htlcs = true;
- } else { errs.push(e); }
+ } else { errs.push((pk, err)); }
+ },
+ ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"),
+ ClaimFundsFromHop::DuplicateClaim => {
+ // While we should never get here in most cases, if we do, it likely
+ // indicates that the HTLC was timed out some time ago and is no longer
+ // available to be claimed. Thus, it does not make sense to set
+ // `claimed_any_htlcs`.
},
- Err(None) => unreachable!("We already checked for channel existence, we can't fail here!"),
- Ok(()) => claimed_any_htlcs = true,
+ ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true,
}
}
}
} else { false }
}
- fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> Result<(), Option<(PublicKey, MsgHandleErrInternal)>> {
+ fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
let channel_state = &mut **channel_state_lock;
let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) {
Some(chan_id) => chan_id.clone(),
None => {
- return Err(None)
+ return ClaimFundsFromHop::PrevHopForceClosed
}
};
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
- let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
- Ok((msgs, monitor_option)) => {
- if let Some(monitor_update) = monitor_option {
+ Ok(msgs_monitor_option) => {
+ if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- if was_frozen_for_monitor {
- assert!(msgs.is_none());
- } else {
- return Err(Some((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err())));
- }
+ log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
+ "Failed to update channel monitor with preimage {:?}: {:?}",
+ payment_preimage, e);
+ return ClaimFundsFromHop::MonitorUpdateFail(
+ chan.get().get_counterparty_node_id(),
+ handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
+ Some(htlc_value_msat)
+ );
}
+ if let Some((msg, commitment_signed)) = msgs {
+ log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
+ log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get().get_counterparty_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: vec![msg],
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed,
+ }
+ });
+ }
+ return ClaimFundsFromHop::Success(htlc_value_msat);
+ } else {
+ return ClaimFundsFromHop::DuplicateClaim;
}
- if let Some((msg, commitment_signed)) = msgs {
- log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
- log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get().get_counterparty_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: vec![msg],
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed,
- }
- });
- }
- return Ok(())
},
- Err(e) => {
- // TODO: Do something with e?
- // This should only occur if we are claiming an HTLC at the same time as the
- // HTLC is being failed (eg because a block is being connected and this caused
- // an HTLC to time out). This should, of course, only occur if the user is the
- // one doing the claiming (as it being a part of a peer claim would imply we're
- // about to lose funds) and only if the lock in claim_funds was dropped as a
- // previous HTLC was failed (thus not for an MPP payment).
- debug_assert!(false, "This shouldn't be reachable except in absurdly rare cases between monitor updates and HTLC timeouts: {:?}", e);
- return Err(None)
+ Err((e, monitor_update)) => {
+ if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+ log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info },
+ "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
+ payment_preimage, e);
+ }
+ let counterparty_node_id = chan.get().get_counterparty_node_id();
+ let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id);
+ if drop {
+ chan.remove_entry();
+ }
+ return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None);
},
}
} else { unreachable!(); }
}
- fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage) {
+ fn finalize_claims(&self, mut sources: Vec<HTLCSource>) {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ for source in sources.drain(..) {
+ if let HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } = source {
+ let mut session_priv_bytes = [0; 32];
+ session_priv_bytes.copy_from_slice(&session_priv[..]);
+ let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+ if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
+ assert!(payment.get().is_fulfilled());
+ if payment.get_mut().remove(&session_priv_bytes, None) {
+ pending_events.push(
+ events::Event::PaymentPathSuccessful {
+ payment_id,
+ payment_hash: payment.get().payment_hash(),
+ path,
+ }
+ );
+ }
+ if payment.get().remaining_parts() == 0 {
+ payment.remove();
+ }
+ }
+ }
+ }
+ }
+
+ fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool) {
match source {
- HTLCSource::OutboundRoute { session_priv, .. } => {
+ HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
mem::drop(channel_state_lock);
- if {
- let mut session_priv_bytes = [0; 32];
- session_priv_bytes.copy_from_slice(&session_priv[..]);
- self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
- } {
+ let mut session_priv_bytes = [0; 32];
+ session_priv_bytes.copy_from_slice(&session_priv[..]);
+ let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+ if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::PaymentSent {
- payment_preimage
- });
+ if !payment.get().is_fulfilled() {
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
+ let fee_paid_msat = payment.get().get_pending_fee_msat();
+ pending_events.push(
+ events::Event::PaymentSent {
+ payment_id: Some(payment_id),
+ payment_preimage,
+ payment_hash,
+ fee_paid_msat,
+ }
+ );
+ payment.get_mut().mark_fulfilled();
+ }
+
+ if from_onchain {
+ // We currently immediately remove HTLCs which were fulfilled on-chain.
+ // This could potentially lead to removing a pending payment too early,
+ // with a reorg of one block causing us to re-add the fulfilled payment on
+ // restart.
+ // TODO: We should have a second monitor event that informs us of payments
+ // irrevocably fulfilled.
+ if payment.get_mut().remove(&session_priv_bytes, Some(&path)) {
+ let payment_hash = Some(PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()));
+ pending_events.push(
+ events::Event::PaymentPathSuccessful {
+ payment_id,
+ payment_hash,
+ path,
+ }
+ );
+ }
+
+ if payment.get().remaining_parts() == 0 {
+ payment.remove();
+ }
+ }
} else {
log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0));
}
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_outpoint = hop_data.outpoint;
- if let Err((counterparty_node_id, err)) = match self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage) {
- Ok(()) => Ok(()),
- Err(None) => {
- let preimage_update = ChannelMonitorUpdate {
- update_id: CLOSED_CHANNEL_UPDATE_ID,
- updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
- payment_preimage: payment_preimage.clone(),
- }],
- };
- // We update the ChannelMonitor on the backward link, after
- // receiving an offchain preimage event from the forward link (the
- // event being update_fulfill_htlc).
- if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
- log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
- payment_preimage, e);
- }
- Ok(())
- },
- Err(Some(res)) => Err(res),
- } {
- mem::drop(channel_state_lock);
- let res: Result<(), _> = Err(err);
- let _ = handle_error!(self, res, counterparty_node_id);
+ let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage);
+ let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true };
+ let htlc_claim_value_msat = match res {
+ ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt,
+ ClaimFundsFromHop::Success(amt) => Some(amt),
+ _ => None,
+ };
+ if let ClaimFundsFromHop::PrevHopForceClosed = res {
+ let preimage_update = ChannelMonitorUpdate {
+ update_id: CLOSED_CHANNEL_UPDATE_ID,
+ updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage: payment_preimage.clone(),
+ }],
+ };
+ // We update the ChannelMonitor on the backward link, after
+ // receiving an offchain preimage event from the forward link (the
+ // event being update_fulfill_htlc).
+ if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
+ log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+ payment_preimage, e);
+ }
+ // Note that we do *not* set `claimed_htlc` to false here. In fact, this
+ // totally could be a duplicate claim, but we have no way of knowing
+ // without interrogating the `ChannelMonitor` we've provided the above
+ // update to. Instead, we simply document in `PaymentForwarded` that this
+ // can happen.
+ }
+ mem::drop(channel_state_lock);
+ if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res {
+ let result: Result<(), _> = Err(err);
+ let _ = handle_error!(self, result, pk);
+ }
+
+ if claimed_htlc {
+ if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+ let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
+ Some(claimed_htlc_value - forwarded_htlc_value)
+ } else { None };
+
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::PaymentForwarded {
+ fee_earned_msat,
+ claim_from_onchain_tx: from_onchain,
+ });
+ }
}
},
}
self.our_network_pubkey.clone()
}
- /// Restores a single, given channel to normal operation after a
- /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update
- /// operation.
- ///
- /// All ChannelMonitor updates up to and including highest_applied_update_id must have been
- /// fully committed in every copy of the given channels' ChannelMonitors.
- ///
- /// Note that there is no effect to calling with a highest_applied_update_id other than the
- /// current latest ChannelMonitorUpdate and one call to this function after multiple
- /// ChannelMonitorUpdateErr::TemporaryFailures is fine. The highest_applied_update_id field
- /// exists largely only to prevent races between this and concurrent update_monitor calls.
- ///
- /// Thus, the anticipated use is, at a high level:
- /// 1) You register a chain::Watch with this ChannelManager,
- /// 2) it stores each update to disk, and begins updating any remote (eg watchtower) copies of
- /// said ChannelMonitors as it can, returning ChannelMonitorUpdateErr::TemporaryFailures
- /// any time it cannot do so instantly,
- /// 3) update(s) are applied to each remote copy of a ChannelMonitor,
- /// 4) once all remote copies are updated, you call this function with the update_id that
- /// completed, and once it is the latest the Channel will be re-enabled.
- pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
+ fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let chan_restoration_res;
- let mut pending_failures = {
+ let (mut pending_failures, finalized_claims) = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
return;
}
- let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
- let channel_update = if funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() {
+ let updates = channel.get_mut().monitor_updating_restored(&self.logger);
+ let channel_update = if updates.funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() {
// We only send a channel_update in the case where we are just now sending a
// funding_locked and the channel is in a usable state. Further, we rely on the
// normal announcement_signatures process to send a channel_update for public
msg: self.get_channel_update_for_unicast(channel.get()).unwrap(),
})
} else { None };
- chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked);
+ chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.funding_locked);
if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd);
}
- pending_failures
+ (updates.failed_htlcs, updates.finalized_claimed_htlcs)
};
post_handle_chan_restoration!(self, chan_restoration_res);
+ self.finalize_claims(finalized_claims);
for failure in pending_failures.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
}
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
}
- let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), their_features, msg, 0, &self.default_configuration)
+ if !self.default_configuration.accept_inbound_channels {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone()));
+ }
+
+ let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(),
+ &their_features, msg, 0, &self.default_configuration, self.best_block.read().unwrap().height(), &self.logger)
.map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
}
- try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_features), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, &their_features), channel_state, chan);
(chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
// hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
// accepted payment from yet. We do, however, need to wait to send our funding_locked
// until we have persisted our monitor.
- chan.monitor_update_failed(false, false, Vec::new(), Vec::new());
+ chan.monitor_update_failed(false, false, Vec::new(), Vec::new(), Vec::new());
},
}
}
Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
};
if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
- return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+ let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+ if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+ // We weren't able to watch the channel to begin with, so no updates should be made on
+ // it. Previously, full_stack_target found an (unreachable) panic when the
+ // monitor update contained within `shutdown_finish` was applied.
+ if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+ shutdown_finish.0.take();
+ }
+ }
+ return res
}
funding_tx
},
}
fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
- let (mut dropped_htlcs, chan_option) = {
+ let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
+ let result: Result<(), _> = loop {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &their_features, &msg), channel_state, chan_entry);
+
+ if !chan_entry.get().received_shutdown() {
+ log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+ log_bytes!(msg.channel_id),
+ if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
+ }
+
+ let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry);
+ dropped_htlcs = htlcs;
+
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update {
+ if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
+ let (result, is_permanent) =
+ handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), Vec::new(), chan_entry.key());
+ if is_permanent {
+ remove_channel!(channel_state, chan_entry);
+ break result;
+ }
+ }
+ }
+
if let Some(msg) = shutdown {
channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
- if let Some(msg) = closing_signed {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
- node_id: counterparty_node_id.clone(),
+ node_id: *counterparty_node_id,
msg,
});
}
- if chan_entry.get().is_shutdown() {
- if let Some(short_id) = chan_entry.get().get_short_channel_id() {
- channel_state.short_to_id.remove(&short_id);
- }
- (dropped_htlcs, Some(chan_entry.remove_entry().1))
- } else { (dropped_htlcs, None) }
+
+ break Ok(());
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
for htlc_source in dropped_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
- if let Some(chan) = chan_option {
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- let mut channel_state = self.channel_state.lock().unwrap();
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- }
+
+ let _ = handle_error!(self, result, *counterparty_node_id);
Ok(())
}
msg: update
});
}
+ self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure);
}
Ok(())
}
}
let create_pending_htlc_status = |chan: &Channel<Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
- // Ensure error_code has the UPDATE flag set, since by default we send a
- // channel update along as part of failing the HTLC.
- assert!((error_code & 0x1000) != 0);
// If the update_add is completely bogus, the call will Err and we will close,
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
// want to reject the new HTLC and fail it backwards instead of forwarding.
match pending_forward_info {
PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
- let reason = if let Ok(upd) = self.get_channel_update_for_unicast(chan) {
- onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{
- let mut res = Vec::with_capacity(8 + 128);
- // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
- res.extend_from_slice(&byte_utils::be16_to_array(0));
- res.extend_from_slice(&upd.encode_with_len()[..]);
- res
- }[..])
+ let reason = if (error_code & 0x1000) != 0 {
+ if let Ok(upd) = self.get_channel_update_for_unicast(chan) {
+ onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{
+ let mut res = Vec::with_capacity(8 + 128);
+ // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
+ res.extend_from_slice(&byte_utils::be16_to_array(0));
+ res.extend_from_slice(&upd.encode_with_len()[..]);
+ res
+ }[..])
+ } else {
+ // The only case where we'd be unable to
+ // successfully get a channel update is if the
+ // channel isn't in the fully-funded state yet,
+ // implying our counterparty is trying to route
+ // payments over the channel back to themselves
+ // (because no one else should know the short_id
+ // is a lightning channel yet). We should have
+ // no problem just calling this
+ // unknown_next_peer (0x4000|10).
+ onion_utils::build_first_hop_failure_packet(incoming_shared_secret, 0x4000|10, &[])
+ }
} else {
- // The only case where we'd be unable to
- // successfully get a channel update is if the
- // channel isn't in the fully-funded state yet,
- // implying our counterparty is trying to route
- // payments over the channel back to themselves
- // (cause no one else should know the short_id
- // is a lightning channel yet). We should have
- // no problem just calling this
- // unknown_next_peer (0x4000|10).
- onion_utils::build_first_hop_failure_packet(incoming_shared_secret, 0x4000|10, &[])
+ onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[])
};
let msg = msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
let mut channel_lock = self.channel_state.lock().unwrap();
- let htlc_source = {
+ let (htlc_source, forwarded_htlc_value) = {
let channel_state = &mut *channel_lock;
match channel_state.by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
- self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone());
+ self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false);
Ok(())
}
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let (revoke_and_ack, commitment_signed, closing_signed, monitor_update) =
- match chan.get_mut().commitment_signed(&msg, &self.fee_estimator, &self.logger) {
+ let (revoke_and_ack, commitment_signed, monitor_update) =
+ match chan.get_mut().commitment_signed(&msg, &self.logger) {
Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan),
Err((Some(update), e)) => {
assert!(chan.get().is_awaiting_monitor_update());
};
if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
- //TODO: Rebroadcast closing_signed if present on monitor update restoration
}
channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
node_id: counterparty_node_id.clone(),
},
});
}
- if let Some(msg) = closing_signed {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
Ok(())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
match channel_state.forward_htlcs.entry(match forward_info.routing {
PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
PendingHTLCRouting::Receive { .. } => 0,
+ PendingHTLCRouting::ReceiveKeysend { .. } => 0,
}) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint,
break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
- let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update, htlcs_to_fail_in) =
- break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan);
- htlcs_to_fail = htlcs_to_fail_in;
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+ let raa_updates = break_chan_entry!(self,
+ chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
+ htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
+ if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) {
if was_frozen_for_monitor {
- assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
+ assert!(raa_updates.commitment_update.is_none());
+ assert!(raa_updates.accepted_htlcs.is_empty());
+ assert!(raa_updates.failed_htlcs.is_empty());
+ assert!(raa_updates.finalized_claimed_htlcs.is_empty());
break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
} else {
- if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) {
+ if let Err(e) = handle_monitor_err!(self, e, channel_state, chan,
+ RAACommitmentOrder::CommitmentFirst, false,
+ raa_updates.commitment_update.is_some(),
+ raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+ raa_updates.finalized_claimed_htlcs) {
break Err(e);
} else { unreachable!(); }
}
}
- if let Some(updates) = commitment_update {
+ if let Some(updates) = raa_updates.commitment_update {
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: counterparty_node_id.clone(),
updates,
});
}
- if let Some(msg) = closing_signed {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
- break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap()))
+ break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+ raa_updates.finalized_claimed_htlcs,
+ chan.get().get_short_channel_id()
+ .expect("RAA should only work on a short-id-available channel"),
+ chan.get().get_funding_txo().unwrap()))
},
hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id);
match res {
- Ok((pending_forwards, mut pending_failures, short_channel_id, channel_outpoint)) => {
+ Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs,
+ short_channel_id, channel_outpoint)) =>
+ {
for failure in pending_failures.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
}
self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
+ self.finalize_claims(finalized_claim_htlcs);
Ok(())
},
Err(e) => Err(e)
Ok(())
}
- /// Begin Update fee process. Allowed only on an outbound channel.
- /// If successful, will generate a UpdateHTLCs event, so you should probably poll
- /// PeerManager::process_events afterwards.
- /// Note: This API is likely to change!
- /// (C-not exported) Cause its doc(hidden) anyway
- #[doc(hidden)]
- pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u32) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let counterparty_node_id;
- let err: Result<(), _> = loop {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
-
- match channel_state.by_id.entry(channel_id) {
- hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: format!("Failed to find corresponding channel for id {}", channel_id.to_hex())}),
- hash_map::Entry::Occupied(mut chan) => {
- if !chan.get().is_outbound() {
- return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel".to_owned()});
- }
- if chan.get().is_awaiting_monitor_update() {
- return Err(APIError::MonitorUpdateFailed);
- }
- if !chan.get().is_live() {
- return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected".to_owned()});
- }
- counterparty_node_id = chan.get().get_counterparty_node_id();
- if let Some((update_fee, commitment_signed, monitor_update)) =
- break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw, &self.logger), channel_state, chan)
- {
- if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- unimplemented!();
- }
- log_debug!(self.logger, "Updating fee resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get().get_counterparty_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: Some(update_fee),
- commitment_signed,
- },
- });
- }
- },
- }
- return Ok(())
- };
-
- match handle_error!(self, err, counterparty_node_id) {
- Ok(_) => unreachable!(),
- Err(e) => { Err(APIError::APIMisuseError { err: e.err })}
- }
- }
-
/// Process pending events from the `chain::Watch`, returning whether any events were processed.
fn process_pending_monitor_events(&self) -> bool {
let mut failed_channels = Vec::new();
- let pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
+ let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
let has_pending_monitor_events = !pending_monitor_events.is_empty();
- for monitor_event in pending_monitor_events {
+ for monitor_event in pending_monitor_events.drain(..) {
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
- self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
+ self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true);
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
},
- MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
+ MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
+ MonitorEvent::UpdateFailed(funding_outpoint) => {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let by_id = &mut channel_state.by_id;
msg: update
});
}
+ let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
+ ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
+ } else {
+ ClosureReason::CommitmentTxConfirmed
+ };
+ self.issue_channel_close_events(&chan, reason);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage {
});
}
},
+ MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => {
+ self.channel_monitor_updated(&funding_txo, monitor_update_id);
+ },
}
}
has_pending_monitor_events
}
+ /// In chanmon_consistency_target, we'd like to be able to restore monitor updating without
+ /// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor
+ /// update events as a separate process method here.
+ #[cfg(feature = "fuzztarget")]
+ pub fn process_monitor_events(&self) {
+ self.process_pending_monitor_events();
+ }
+
/// Check the holding cell in each channel and free any pending HTLCs in them if possible.
/// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
/// update was applied.
if let Some((commitment_update, monitor_update)) = commitment_opt {
if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
has_monitor_update = true;
- let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), channel_id);
+ let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), Vec::new(), channel_id);
handle_errors.push((chan.get_counterparty_node_id(), res));
if close_channel { return false; }
} else {
Err(e) => {
let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+ // ChannelClosed event is generated by handle_error for us
!close_channel
}
}
});
}
- let has_update = has_monitor_update || !failed_htlcs.is_empty();
+ let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
for (failures, channel_id) in failed_htlcs.drain(..) {
self.fail_holding_cell_htlcs(failures, channel_id);
}
has_update
}
+ /// Check whether any channels have finished removing all pending updates after a shutdown
+ /// exchange and can now send a closing_signed.
+ /// Returns whether any closing_signed messages were generated.
+ fn maybe_generate_initial_closing_signed(&self) -> bool {
+ let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
+ let mut has_update = false;
+ {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ let by_id = &mut channel_state.by_id;
+ let short_to_id = &mut channel_state.short_to_id;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+
+ by_id.retain(|channel_id, chan| {
+ match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
+ Ok((msg_opt, tx_opt)) => {
+ if let Some(msg) = msg_opt {
+ has_update = true;
+ pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+ node_id: chan.get_counterparty_node_id(), msg,
+ });
+ }
+ if let Some(tx) = tx_opt {
+ // We're done with this channel. We got a closing_signed and sent back
+ // a closing_signed with a closing transaction to broadcast.
+ if let Some(short_id) = chan.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+
+ self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
+
+ log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
+ self.tx_broadcaster.broadcast_transaction(&tx);
+ false
+ } else { true }
+ },
+ Err(e) => {
+ has_update = true;
+ let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+ handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+ !close_channel
+ }
+ }
+ });
+ }
+
+ for (counterparty_node_id, err) in handle_errors.drain(..) {
+ let _ = handle_error!(self, err, counterparty_node_id);
+ }
+
+ has_update
+ }
+
/// Handle a list of channel failures during a block_connected or block_disconnected call,
/// pushing the channel monitor update (if any) to the background events queue and removing the
/// Channel object.
}
}
- fn set_payment_hash_secret_map(&self, payment_hash: PaymentHash, payment_preimage: Option<PaymentPreimage>, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result<PaymentSecret, APIError> {
+ fn set_payment_hash_secret_map(&self, payment_hash: PaymentHash, payment_preimage: Option<PaymentPreimage>, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<PaymentSecret, APIError> {
assert!(invoice_expiry_delta_secs <= 60*60*24*365); // Sadly bitcoin timestamps are u32s, so panic before 2106
let payment_secret = PaymentSecret(self.keys_manager.get_secure_random_bytes());
match payment_secrets.entry(payment_hash) {
hash_map::Entry::Vacant(e) => {
e.insert(PendingInboundPayment {
- payment_secret, min_value_msat, user_payment_id, payment_preimage,
+ payment_secret, min_value_msat, payment_preimage,
+ user_payment_id: 0, // For compatibility with version 0.0.103 and earlier
// We assume that highest_seen_timestamp is pretty close to the current time -
// its updated when we receive a new block with the maximum time we've seen in
// a header. It should never be more than two hours in the future.
/// [`PaymentReceived`]: events::Event::PaymentReceived
/// [`PaymentReceived::payment_preimage`]: events::Event::PaymentReceived::payment_preimage
/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
- pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> (PaymentHash, PaymentSecret) {
+ pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> (PaymentHash, PaymentSecret) {
let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes());
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
(payment_hash,
- self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs, user_payment_id)
+ self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs)
.expect("RNG Generated Duplicate PaymentHash"))
}
/// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) must be globally unique. This
/// method may return an Err if another payment with the same payment_hash is still pending.
///
- /// `user_payment_id` will be provided back in [`PaymentReceived::user_payment_id`] events to
- /// allow tracking of which events correspond with which calls to this and
- /// [`create_inbound_payment`]. `user_payment_id` has no meaning inside of LDK, it is simply
- /// copied to events and otherwise ignored. It may be used to correlate PaymentReceived events
- /// with invoice metadata stored elsewhere.
- ///
/// `min_value_msat` should be set if the invoice being generated contains a value. Any payment
/// received for the returned [`PaymentHash`] will be required to be at least `min_value_msat`
/// before a [`PaymentReceived`] event will be generated, ensuring that we do not provide the
///
/// [`create_inbound_payment`]: Self::create_inbound_payment
/// [`PaymentReceived`]: events::Event::PaymentReceived
- /// [`PaymentReceived::user_payment_id`]: events::Event::PaymentReceived::user_payment_id
- pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result<PaymentSecret, APIError> {
- self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id)
+ pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<PaymentSecret, APIError> {
+ self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs)
}
#[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))]
pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
let events = core::cell::RefCell::new(Vec::new());
- let event_handler = |event| events.borrow_mut().push(event);
+ let event_handler = |event: &events::Event| events.borrow_mut().push(event.clone());
self.process_pending_events(&event_handler);
events.into_inner()
}
+
+ #[cfg(test)]
+ pub fn has_pending_payments(&self) -> bool {
+ !self.pending_outbound_payments.lock().unwrap().is_empty()
+ }
+
+ #[cfg(test)]
+ pub fn clear_pending_payments(&self) {
+ self.pending_outbound_payments.lock().unwrap().clear()
+ }
}
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
if self.check_free_holding_cells() {
result = NotifyOption::DoPersist;
}
+ if self.maybe_generate_initial_closing_signed() {
+ result = NotifyOption::DoPersist;
+ }
let mut pending_events = Vec::new();
let mut channel_state = self.channel_state.lock().unwrap();
result = NotifyOption::DoPersist;
}
- let mut pending_events = std::mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+ let mut pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
if !pending_events.is_empty() {
result = NotifyOption::DoPersist;
}
for event in pending_events.drain(..) {
- handler.handle_event(event);
+ handler.handle_event(&event);
}
result
payment_secrets.retain(|_, inbound_payment| {
inbound_payment.expiry_time > header.time as u64
});
+
+ let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+ outbounds.retain(|_, payment| {
+ const PAYMENT_EXPIRY_BLOCKS: u32 = 3;
+ if payment.remaining_parts() != 0 { return true }
+ if let PendingOutboundPayment::Retryable { starting_block_height, .. } = payment {
+ return *starting_block_height + PAYMENT_EXPIRY_BLOCKS > height
+ }
+ true
+ });
}
fn get_relevant_txids(&self) -> Vec<Txid> {
/// Calls a function which handles an on-chain event (blocks dis/connected, transactions
/// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
/// the function.
- fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage>>
+ fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), ClosureReason>>
(&self, height_opt: Option<u32>, f: FN) {
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
}
short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
}
- } else if let Err(e) = res {
+ } else if let Err(reason) = res {
if let Some(short_id) = channel.get_short_channel_id() {
short_to_id.remove(&short_id);
}
msg: update
});
}
+ let reason_message = format!("{}", reason);
+ self.issue_channel_close_events(channel, reason);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage { msg: e },
+ action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
+ channel_id: channel.channel_id(),
+ data: reason_message,
+ } },
});
return false;
}
msg: update
});
}
+ self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
false
} else {
true
if let Some(short_id) = chan.get_short_channel_id() {
short_to_id.remove(&short_id);
}
+ self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
return false;
} else {
no_channels_remain = false;
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
&events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
&events::MessageSendEvent::SendShortIdsQuery { .. } => false,
&events::MessageSendEvent::SendReplyChannelRange { .. } => false,
for chan in self.list_channels() {
if chan.counterparty.node_id == *counterparty_node_id {
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
- let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id));
+ let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id), Some(&msg.data));
}
}
} else {
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
- let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id));
+ let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data));
}
}
}
(1, Receive) => {
(0, payment_data, required),
(2, incoming_cltv_expiry, required),
- }
+ },
+ (2, ReceiveKeysend) => {
+ (0, payment_preimage, required),
+ (2, incoming_cltv_expiry, required),
+ },
;);
impl_writeable_tlv_based!(PendingHTLCInfo, {
(8, outgoing_cltv_value, required)
});
-impl_writeable_tlv_based_enum!(HTLCFailureMsg, ;
- (0, Relay),
- (1, Malformed),
-);
+
+impl Writeable for HTLCFailureMsg {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ match self {
+ HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason }) => {
+ 0u8.write(writer)?;
+ channel_id.write(writer)?;
+ htlc_id.write(writer)?;
+ reason.write(writer)?;
+ },
+ HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ channel_id, htlc_id, sha256_of_onion, failure_code
+ }) => {
+ 1u8.write(writer)?;
+ channel_id.write(writer)?;
+ htlc_id.write(writer)?;
+ sha256_of_onion.write(writer)?;
+ failure_code.write(writer)?;
+ },
+ }
+ Ok(())
+ }
+}
+
+impl Readable for HTLCFailureMsg {
+ fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let id: u8 = Readable::read(reader)?;
+ match id {
+ 0 => {
+ Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ channel_id: Readable::read(reader)?,
+ htlc_id: Readable::read(reader)?,
+ reason: Readable::read(reader)?,
+ }))
+ },
+ 1 => {
+ Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ channel_id: Readable::read(reader)?,
+ htlc_id: Readable::read(reader)?,
+ sha256_of_onion: Readable::read(reader)?,
+ failure_code: Readable::read(reader)?,
+ }))
+ },
+ // In versions prior to 0.0.101, HTLCFailureMsg objects were written with type 0 or 1 but
+ // weren't length-prefixed and thus didn't support reading the TLV stream suffix of the network
+ // messages contained in the variants.
+ // In version 0.0.101, support for reading the variants with these types was added, and
+ // we should migrate to writing these variants when UpdateFailHTLC or
+ // UpdateFailMalformedHTLC get TLV fields.
+ 2 => {
+ let length: BigSize = Readable::read(reader)?;
+ let mut s = FixedLengthReader::new(reader, length.0);
+ let res = Readable::read(&mut s)?;
+ s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
+ Ok(HTLCFailureMsg::Relay(res))
+ },
+ 3 => {
+ let length: BigSize = Readable::read(reader)?;
+ let mut s = FixedLengthReader::new(reader, length.0);
+ let res = Readable::read(&mut s)?;
+ s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
+ Ok(HTLCFailureMsg::Malformed(res))
+ },
+ _ => Err(DecodeError::UnknownRequiredFeature),
+ }
+ }
+}
+
impl_writeable_tlv_based_enum!(PendingHTLCStatus, ;
(0, Forward),
(1, Fail),
(6, incoming_packet_shared_secret, required)
});
-impl_writeable_tlv_based!(ClaimableHTLC, {
- (0, prev_hop, required),
- (2, value, required),
- (4, payment_data, required),
- (6, cltv_expiry, required),
-});
+impl Writeable for ClaimableHTLC {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ let payment_data = match &self.onion_payload {
+ OnionPayload::Invoice(data) => Some(data.clone()),
+ _ => None,
+ };
+ let keysend_preimage = match self.onion_payload {
+ OnionPayload::Invoice(_) => None,
+ OnionPayload::Spontaneous(preimage) => Some(preimage.clone()),
+ };
+ write_tlv_fields!
+ (writer,
+ {
+ (0, self.prev_hop, required), (2, self.value, required),
+ (4, payment_data, option), (6, self.cltv_expiry, required),
+ (8, keysend_preimage, option),
+ });
+ Ok(())
+ }
+}
-impl_writeable_tlv_based_enum!(HTLCSource,
- (0, OutboundRoute) => {
- (0, session_priv, required),
- (2, first_hop_htlc_msat, required),
- (4, path, vec_type),
- }, ;
- (1, PreviousHopData)
-);
+impl Readable for ClaimableHTLC {
+ fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let mut prev_hop = ::util::ser::OptionDeserWrapper(None);
+ let mut value = 0;
+ let mut payment_data: Option<msgs::FinalOnionHopData> = None;
+ let mut cltv_expiry = 0;
+ let mut keysend_preimage: Option<PaymentPreimage> = None;
+ read_tlv_fields!
+ (reader,
+ {
+ (0, prev_hop, required), (2, value, required),
+ (4, payment_data, option), (6, cltv_expiry, required),
+ (8, keysend_preimage, option)
+ });
+ let onion_payload = match keysend_preimage {
+ Some(p) => {
+ if payment_data.is_some() {
+ return Err(DecodeError::InvalidValue)
+ }
+ OnionPayload::Spontaneous(p)
+ },
+ None => {
+ if payment_data.is_none() {
+ return Err(DecodeError::InvalidValue)
+ }
+ OnionPayload::Invoice(payment_data.unwrap())
+ },
+ };
+ Ok(Self {
+ prev_hop: prev_hop.0.unwrap(),
+ value,
+ onion_payload,
+ cltv_expiry,
+ })
+ }
+}
+
+impl Readable for HTLCSource {
+ fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let id: u8 = Readable::read(reader)?;
+ match id {
+ 0 => {
+ let mut session_priv: ::util::ser::OptionDeserWrapper<SecretKey> = ::util::ser::OptionDeserWrapper(None);
+ let mut first_hop_htlc_msat: u64 = 0;
+ let mut path = Some(Vec::new());
+ let mut payment_id = None;
+ let mut payment_secret = None;
+ let mut payee = None;
+ read_tlv_fields!(reader, {
+ (0, session_priv, required),
+ (1, payment_id, option),
+ (2, first_hop_htlc_msat, required),
+ (3, payment_secret, option),
+ (4, path, vec_type),
+ (5, payee, option),
+ });
+ if payment_id.is_none() {
+ // For backwards compat, if there was no payment_id written, use the session_priv bytes
+ // instead.
+ payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
+ }
+ Ok(HTLCSource::OutboundRoute {
+ session_priv: session_priv.0.unwrap(),
+ first_hop_htlc_msat: first_hop_htlc_msat,
+ path: path.unwrap(),
+ payment_id: payment_id.unwrap(),
+ payment_secret,
+ payee,
+ })
+ }
+ 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
+ _ => Err(DecodeError::UnknownRequiredFeature),
+ }
+ }
+}
+
+impl Writeable for HTLCSource {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::io::Error> {
+ match self {
+ HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret, payee } => {
+ 0u8.write(writer)?;
+ let payment_id_opt = Some(payment_id);
+ write_tlv_fields!(writer, {
+ (0, session_priv, required),
+ (1, payment_id_opt, option),
+ (2, first_hop_htlc_msat, required),
+ (3, payment_secret, option),
+ (4, path, vec_type),
+ (5, payee, option),
+ });
+ }
+ HTLCSource::PreviousHopData(ref field) => {
+ 1u8.write(writer)?;
+ field.write(writer)?;
+ }
+ }
+ Ok(())
+ }
+}
impl_writeable_tlv_based_enum!(HTLCFailReason,
(0, LightningError) => {
(8, min_value_msat, required),
});
+impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment,
+ (0, Legacy) => {
+ (0, session_privs, required),
+ },
+ (1, Fulfilled) => {
+ (0, session_privs, required),
+ (1, payment_hash, option),
+ },
+ (2, Retryable) => {
+ (0, session_privs, required),
+ (1, pending_fee_msat, option),
+ (2, payment_hash, required),
+ (4, payment_secret, option),
+ (6, total_msat, required),
+ (8, pending_amt_msat, required),
+ (10, starting_block_height, required),
+ },
+);
+
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<Signer, M, T, K, F, L>
where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
let _consistency_lock = self.total_consistency_lock.write().unwrap();
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
}
let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
- (pending_outbound_payments.len() as u64).write(writer)?;
- for session_priv in pending_outbound_payments.iter() {
- session_priv.write(writer)?;
+ // For backwards compat, write the session privs and their total length.
+ let mut num_pending_outbounds_compat: u64 = 0;
+ for (_, outbound) in pending_outbound_payments.iter() {
+ if !outbound.is_fulfilled() {
+ num_pending_outbounds_compat += outbound.remaining_parts() as u64;
+ }
+ }
+ num_pending_outbounds_compat.write(writer)?;
+ for (_, outbound) in pending_outbound_payments.iter() {
+ match outbound {
+ PendingOutboundPayment::Legacy { session_privs } |
+ PendingOutboundPayment::Retryable { session_privs, .. } => {
+ for session_priv in session_privs.iter() {
+ session_priv.write(writer)?;
+ }
+ }
+ PendingOutboundPayment::Fulfilled { .. } => {},
+ }
}
- write_tlv_fields!(writer, {});
+ // Encode without retry info for 0.0.101 compatibility.
+ let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = HashMap::new();
+ for (id, outbound) in pending_outbound_payments.iter() {
+ match outbound {
+ PendingOutboundPayment::Legacy { session_privs } |
+ PendingOutboundPayment::Retryable { session_privs, .. } => {
+ pending_outbound_payments_no_retry.insert(*id, session_privs.clone());
+ },
+ _ => {},
+ }
+ }
+ write_tlv_fields!(writer, {
+ (1, pending_outbound_payments_no_retry, required),
+ (3, pending_outbound_payments, required),
+ });
Ok(())
}
///
/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
/// is:
-/// 1) Deserialize all stored ChannelMonitors.
-/// 2) Deserialize the ChannelManager by filling in this struct and calling:
-/// <(BlockHash, ChannelManager)>::read(reader, args)
-/// This may result in closing some Channels if the ChannelMonitor is newer than the stored
-/// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
-/// 3) If you are not fetching full blocks, register all relevant ChannelMonitor outpoints the same
-/// way you would handle a `chain::Filter` call using ChannelMonitor::get_outputs_to_watch() and
-/// ChannelMonitor::get_funding_txo().
-/// 4) Reconnect blocks on your ChannelMonitors.
-/// 5) Disconnect/connect blocks on the ChannelManager.
-/// 6) Move the ChannelMonitors into your local chain::Watch.
+/// 1) Deserialize all stored [`ChannelMonitor`]s.
+/// 2) Deserialize the [`ChannelManager`] by filling in this struct and calling:
+/// `<(BlockHash, ChannelManager)>::read(reader, args)`
+/// This may result in closing some channels if the [`ChannelMonitor`] is newer than the stored
+/// [`ChannelManager`] state to ensure no loss of funds. Thus, transactions may be broadcasted.
+/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the
+/// same way you would handle a [`chain::Filter`] call using
+/// [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`].
+/// 4) Reconnect blocks on your [`ChannelMonitor`]s.
+/// 5) Disconnect/connect blocks on the [`ChannelManager`].
+/// 6) Re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
+/// Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you
+/// will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in
+/// the next step.
+/// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a
+/// [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`].
///
-/// Note that the ordering of #4-6 is not of importance, however all three must occur before you
-/// call any other methods on the newly-deserialized ChannelManager.
+/// Note that the ordering of #4-7 is not of importance, however all four must occur before you
+/// call any other methods on the newly-deserialized [`ChannelManager`].
///
/// Note that because some channels may be closed during deserialization, it is critical that you
/// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to
/// broadcast), and then later deserialize a newer version of the same ChannelManager (which will
/// not force-close the same channels but consider them live), you may end up revoking a state for
/// which you've already broadcasted the transaction.
+///
+/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn read<R: ::std::io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
let (blockhash, chan_manager) = <(BlockHash, ChannelManager<Signer, M, T, K, F, L>)>::read(reader, args)?;
Ok((blockhash, Arc::new(chan_manager)))
}
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn read<R: ::std::io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let genesis_hash: BlockHash = Readable::read(reader)?;
let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut channel_closures = Vec::new();
for _ in 0..channel_count {
- let mut channel: Channel<Signer> = Channel::read(reader, &args.keys_manager)?;
+ let mut channel: Channel<Signer> = Channel::read(reader, (&args.keys_manager, best_block_height))?;
let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
// But if the channel is behind of the monitor, close the channel:
+ log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
+ log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
+ log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+ log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
failed_htlcs.append(&mut new_failed_htlcs);
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
+ channel_closures.push(events::Event::ChannelClosed {
+ channel_id: channel.channel_id(),
+ user_channel_id: channel.get_user_id(),
+ reason: ClosureReason::OutdatedChannelManager
+ });
} else {
+ log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id()));
if let Some(short_channel_id) = channel.get_short_channel_id() {
short_to_id.insert(short_channel_id, channel.channel_id());
}
for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() {
if !funding_txo_set.contains(funding_txo) {
+ log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id()));
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
}
}
None => continue,
}
}
+ if forward_htlcs_count > 0 {
+ // If we have pending HTLCs to forward, assume we either dropped a
+ // `PendingHTLCsForwardable` or the user received it but never processed it as they
+ // shut down before the timer hit. Either way, set the time_forwardable to a small
+ // constant as enough time has likely passed that we should simply handle the forwards
+ // now, or at least after the user gets a chance to reconnect to our peers.
+ pending_events_read.push(events::Event::PendingHTLCsForwardable {
+ time_forwardable: Duration::from_secs(2),
+ });
+ }
let background_event_count: u64 = Readable::read(reader)?;
let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
}
}
- let pending_outbound_payments_count: u64 = Readable::read(reader)?;
- let mut pending_outbound_payments: HashSet<[u8; 32]> = HashSet::with_capacity(cmp::min(pending_outbound_payments_count as usize, MAX_ALLOC_SIZE/32));
- for _ in 0..pending_outbound_payments_count {
- if !pending_outbound_payments.insert(Readable::read(reader)?) {
- return Err(DecodeError::InvalidValue);
- }
+ let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
+ let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
+ HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
+ for _ in 0..pending_outbound_payments_count_compat {
+ let session_priv = Readable::read(reader)?;
+ let payment = PendingOutboundPayment::Legacy {
+ session_privs: [session_priv].iter().cloned().collect()
+ };
+ if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
+ return Err(DecodeError::InvalidValue)
+ };
}
- read_tlv_fields!(reader, {});
+ // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
+ let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
+ let mut pending_outbound_payments = None;
+ read_tlv_fields!(reader, {
+ (1, pending_outbound_payments_no_retry, option),
+ (3, pending_outbound_payments, option),
+ });
+ if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
+ pending_outbound_payments = Some(pending_outbound_payments_compat);
+ } else if pending_outbound_payments.is_none() {
+ let mut outbounds = HashMap::new();
+ for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
+ outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
+ }
+ pending_outbound_payments = Some(outbounds);
+ } else {
+ // If we're tracking pending payments, ensure we haven't lost any by looking at the
+ // ChannelMonitor data for any channels for which we do not have authorative state
+ // (i.e. those for which we just force-closed above or we otherwise don't have a
+ // corresponding `Channel` at all).
+ // This avoids several edge-cases where we would otherwise "forget" about pending
+ // payments which are still in-flight via their on-chain state.
+ // We only rebuild the pending payments map if we were most recently serialized by
+ // 0.0.102+
+ for (_, monitor) in args.channel_monitors {
+ if by_id.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
+ for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() {
+ if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source {
+ if path.is_empty() {
+ log_error!(args.logger, "Got an empty path for a pending payment");
+ return Err(DecodeError::InvalidValue);
+ }
+ let path_amt = path.last().unwrap().fee_msat;
+ let mut session_priv_bytes = [0; 32];
+ session_priv_bytes[..].copy_from_slice(&session_priv[..]);
+ match pending_outbound_payments.as_mut().unwrap().entry(payment_id) {
+ hash_map::Entry::Occupied(mut entry) => {
+ let newly_added = entry.get_mut().insert(session_priv_bytes, &path);
+ log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
+ if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), log_bytes!(htlc.payment_hash.0));
+ },
+ hash_map::Entry::Vacant(entry) => {
+ let path_fee = path.get_path_fees();
+ entry.insert(PendingOutboundPayment::Retryable {
+ session_privs: [session_priv_bytes].iter().map(|a| *a).collect(),
+ payment_hash: htlc.payment_hash,
+ payment_secret,
+ pending_amt_msat: path_amt,
+ pending_fee_msat: Some(path_fee),
+ total_msat: path_amt,
+ starting_block_height: best_block_height,
+ });
+ log_info!(args.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}",
+ path_amt, log_bytes!(htlc.payment_hash.0), log_bytes!(session_priv_bytes));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
+ if !channel_closures.is_empty() {
+ pending_events_read.append(&mut channel_closures);
+ }
+
let channel_manager = ChannelManager {
genesis_hash,
fee_estimator: args.fee_estimator,
pending_msg_events: Vec::new(),
}),
pending_inbound_payments: Mutex::new(pending_inbound_payments),
- pending_outbound_payments: Mutex::new(pending_outbound_payments),
+ pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
our_network_key: args.keys_manager.get_node_secret(),
our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &args.keys_manager.get_node_secret()),
#[cfg(test)]
mod tests {
- use ln::channelmanager::PersistenceNotifier;
- use std::sync::Arc;
- use core::sync::atomic::{AtomicBool, Ordering};
- use std::thread;
+ use bitcoin::hashes::Hash;
+ use bitcoin::hashes::sha256::Hash as Sha256;
use core::time::Duration;
- use ln::functional_test_utils::*;
+ use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+ use ln::channelmanager::{PaymentId, PaymentSendFailure};
use ln::features::InitFeatures;
+ use ln::functional_test_utils::*;
+ use ln::msgs;
use ln::msgs::ChannelMessageHandler;
+ use routing::router::{Payee, RouteParameters, find_route};
+ use util::errors::APIError;
+ use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+ use util::test_utils;
+ #[cfg(feature = "std")]
#[test]
fn test_wait_timeout() {
+ use ln::channelmanager::PersistenceNotifier;
+ use sync::Arc;
+ use core::sync::atomic::{AtomicBool, Ordering};
+ use std::thread;
+
let persistence_notifier = Arc::new(PersistenceNotifier::new());
let thread_notifier = Arc::clone(&persistence_notifier);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ // All nodes start with a persistable update pending as `create_network` connects each node
+ // with all other nodes to make most tests simpler.
+ assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
// We check that the channel info nodes have doesn't change too early, even though we try
assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
}
+
+ #[test]
+ fn test_keysend_dup_hash_partial_mpp() {
+ // Test that a keysend payment with a duplicate hash to an existing partial MPP payment fails as
+ // expected.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+
+ // First, send a partial MPP payment.
+ let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
+ let payment_id = PaymentId([42; 32]);
+ // Use the utility function send_payment_along_path to send the payment with MPP data which
+ // indicates there are more HTLCs coming.
+ let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
+ nodes[0].node.send_payment_along_path(&route.paths[0], &route.payee, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
+
+ // Next, send a keysend payment with the same payment_hash and make sure it fails.
+ nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let ev = events.drain(..).next().unwrap();
+ let payment_event = SendEvent::from_event(ev);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ check_added_monitors!(nodes[1], 0);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+ expect_payment_failed!(nodes[0], our_payment_hash, true);
+
+ // Send the second half of the original MPP payment.
+ nodes[0].node.send_payment_along_path(&route.paths[0], &route.payee, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
+
+ // Claim the full MPP payment. Note that we can't use a test utility like
+ // claim_funds_along_route because the ordering of the messages causes the second half of the
+ // payment to be put in the holding cell, which confuses the test utilities. So we exchange the
+ // lightning messages manually.
+ assert!(nodes[1].node.claim_funds(payment_preimage));
+ check_added_monitors!(nodes[1], 2);
+ let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
+ check_added_monitors!(nodes[0], 1);
+ let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
+ check_added_monitors!(nodes[1], 1);
+ let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_cs);
+ check_added_monitors!(nodes[1], 1);
+ let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
+ check_added_monitors!(nodes[0], 1);
+ let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
+ let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ check_added_monitors!(nodes[0], 1);
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
+ check_added_monitors!(nodes[1], 1);
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
+ check_added_monitors!(nodes[1], 1);
+ let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
+ check_added_monitors!(nodes[0], 1);
+
+ // Note that successful MPP payments will generate a single PaymentSent event upon the first
+ // path's success and a PaymentPathSuccessful event for each path's success.
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 3);
+ match events[0] {
+ Event::PaymentSent { payment_id: ref id, payment_preimage: ref preimage, payment_hash: ref hash, .. } => {
+ assert_eq!(Some(payment_id), *id);
+ assert_eq!(payment_preimage, *preimage);
+ assert_eq!(our_payment_hash, *hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
+ assert_eq!(payment_id, *actual_payment_id);
+ assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
+ assert_eq!(route.paths[0], *path);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[2] {
+ Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
+ assert_eq!(payment_id, *actual_payment_id);
+ assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
+ assert_eq!(route.paths[0], *path);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+
+ #[test]
+ fn test_keysend_dup_payment_hash() {
+ // (1): Test that a keysend payment with a duplicate payment hash to an existing pending
+ // outbound regular payment fails as expected.
+ // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
+ // fails as expected.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let scorer = test_utils::TestScorer::with_fixed_penalty(0);
+
+ // To start (1), send a regular payment but don't claim it.
+ let expected_route = [&nodes[1]];
+ let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000);
+
+ // Next, attempt a keysend payment and make sure it fails.
+ let params = RouteParameters {
+ payee: Payee::for_keysend(expected_route.last().unwrap().node.get_our_node_id()),
+ final_value_msat: 100_000,
+ final_cltv_expiry_delta: TEST_FINAL_CLTV,
+ };
+ let route = find_route(
+ &nodes[0].node.get_our_node_id(), ¶ms, nodes[0].network_graph, None,
+ nodes[0].logger, &scorer
+ ).unwrap();
+ nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let ev = events.drain(..).next().unwrap();
+ let payment_event = SendEvent::from_event(ev);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ check_added_monitors!(nodes[1], 0);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+ expect_payment_failed!(nodes[0], payment_hash, true);
+
+ // Finally, claim the original payment.
+ claim_payment(&nodes[0], &expected_route, payment_preimage);
+
+ // To start (2), send a keysend payment but don't claim it.
+ let payment_preimage = PaymentPreimage([42; 32]);
+ let route = find_route(
+ &nodes[0].node.get_our_node_id(), ¶ms, nodes[0].network_graph, None,
+ nodes[0].logger, &scorer
+ ).unwrap();
+ let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let event = events.pop().unwrap();
+ let path = vec![&nodes[1]];
+ pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
+
+ // Next, attempt a regular payment and make sure it fails.
+ let payment_secret = PaymentSecret([43; 32]);
+ nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let ev = events.drain(..).next().unwrap();
+ let payment_event = SendEvent::from_event(ev);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ check_added_monitors!(nodes[1], 0);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+ expect_payment_failed!(nodes[0], payment_hash, true);
+
+ // Finally, succeed the keysend payment.
+ claim_payment(&nodes[0], &expected_route, payment_preimage);
+ }
+
+ #[test]
+ fn test_keysend_hash_mismatch() {
+ // Test that if we receive a keysend `update_add_htlc` msg, we fail as expected if the keysend
+ // preimage doesn't match the msg's payment hash.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let payer_pubkey = nodes[0].node.get_our_node_id();
+ let payee_pubkey = nodes[1].node.get_our_node_id();
+ nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
+ nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+
+ let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+ let params = RouteParameters {
+ payee: Payee::for_keysend(payee_pubkey),
+ final_value_msat: 10000,
+ final_cltv_expiry_delta: 40,
+ };
+ let network_graph = nodes[0].network_graph;
+ let first_hops = nodes[0].node.list_usable_channels();
+ let scorer = test_utils::TestScorer::with_fixed_penalty(0);
+ let route = find_route(
+ &payer_pubkey, ¶ms, network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
+ nodes[0].logger, &scorer
+ ).unwrap();
+
+ let test_preimage = PaymentPreimage([42; 32]);
+ let mismatch_payment_hash = PaymentHash([43; 32]);
+ let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), None, None).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ assert_eq!(updates.update_add_htlcs.len(), 1);
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
+ nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1);
+ }
+
+ #[test]
+ fn test_keysend_msg_with_secret_err() {
+ // Test that we error as expected if we receive a keysend payment that includes a payment secret.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let payer_pubkey = nodes[0].node.get_our_node_id();
+ let payee_pubkey = nodes[1].node.get_our_node_id();
+ nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
+ nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+
+ let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+ let params = RouteParameters {
+ payee: Payee::for_keysend(payee_pubkey),
+ final_value_msat: 10000,
+ final_cltv_expiry_delta: 40,
+ };
+ let network_graph = nodes[0].network_graph;
+ let first_hops = nodes[0].node.list_usable_channels();
+ let scorer = test_utils::TestScorer::with_fixed_penalty(0);
+ let route = find_route(
+ &payer_pubkey, ¶ms, network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
+ nodes[0].logger, &scorer
+ ).unwrap();
+
+ let test_preimage = PaymentPreimage([42; 32]);
+ let test_secret = PaymentSecret([43; 32]);
+ let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
+ let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), None, None).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ assert_eq!(updates.update_add_htlcs.len(), 1);
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
+ nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1);
+ }
+
+ #[test]
+ fn test_multi_hop_missing_secret() {
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+
+ // Marshall an MPP route.
+ let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
+ let path = route.paths[0].clone();
+ route.paths.push(path);
+ route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
+ route.paths[0][0].short_channel_id = chan_1_id;
+ route.paths[0][1].short_channel_id = chan_3_id;
+ route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
+ route.paths[1][0].short_channel_id = chan_2_id;
+ route.paths[1][1].short_channel_id = chan_4_id;
+
+ match nodes[0].node.send_payment(&route, payment_hash, &None).unwrap_err() {
+ PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => {
+ assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) },
+ _ => panic!("unexpected error")
+ }
+ }
}
#[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]
pub mod bench {
use chain::Listen;
- use chain::chainmonitor::ChainMonitor;
- use chain::channelmonitor::Persist;
+ use chain::chainmonitor::{ChainMonitor, Persist};
use chain::keysinterface::{KeysManager, InMemorySigner};
use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
use ln::features::{InitFeatures, InvoiceFeatures};
use ln::functional_test_utils::*;
- use ln::msgs::ChannelMessageHandler;
+ use ln::msgs::{ChannelMessageHandler, Init};
use routing::network_graph::NetworkGraph;
- use routing::router::get_route;
+ use routing::router::{Payee, get_route};
+ use routing::scoring::Scorer;
use util::test_utils;
use util::config::UserConfig;
- use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+ use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::{Block, BlockHeader, Transaction, TxOut};
- use std::sync::{Arc, Mutex};
+ use sync::{Arc, Mutex};
use test::Bencher;
});
let node_b_holder = NodeHolder { node: &node_b };
+ node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known() });
+ node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known() });
node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
macro_rules! send_payment {
($node_a: expr, $node_b: expr) => {
let usable_channels = $node_a.list_usable_channels();
- let route = get_route(&$node_a.get_our_node_id(), &dummy_graph, &$node_b.get_our_node_id(), Some(InvoiceFeatures::known()),
- Some(&usable_channels.iter().map(|r| r).collect::<Vec<_>>()), &[], 10_000, TEST_FINAL_CLTV, &logger_a).unwrap();
+ let payee = Payee::from_node_id($node_b.get_our_node_id())
+ .with_features(InvoiceFeatures::known());
+ let scorer = Scorer::with_fixed_penalty(0);
+ let route = get_route(&$node_a.get_our_node_id(), &payee, &dummy_graph,
+ Some(&usable_channels.iter().map(|r| r).collect::<Vec<_>>()), 10_000, TEST_FINAL_CLTV, &logger_a, &scorer).unwrap();
let mut payment_preimage = PaymentPreimage([0; 32]);
payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
payment_count += 1;
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
- let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, 0).unwrap();
+ let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200).unwrap();
$node_a.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());